VirtualBox

Changeset 59149 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Dec 16, 2015 11:12:58 AM (9 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: Fewer branches where applicable.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r59141 r59149  
    12571257
    12581258    /* Update number of guest MSRs to load/store across the world-switch. */
    1259     int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs);  AssertRCReturn(rc, rc);
    1260     rc     = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs);  AssertRCReturn(rc, rc);
     1259    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs);
     1260    rc    |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs);
    12611261
    12621262    /* Update number of host MSRs to load after the world-switch. Identical to guest-MSR count as it's always paired. */
    1263     rc     = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  cMsrs);  AssertRCReturn(rc, rc);
     1263    rc    |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  cMsrs);
     1264    AssertRCReturn(rc, rc);
    12641265
    12651266    /* Update the VCPU's copy of the MSR count. */
     
    24852486            val |= VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT;            /* Enable pause-loop exiting. */
    24862487
    2487             rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks);
    2488             AssertRCReturn(rc, rc);
    2489 
    2490             rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks);
     2488            rc  = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks);
     2489            rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks);
    24912490            AssertRCReturn(rc, rc);
    24922491        }
     
    25362535#if 0
    25372536    /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestCR3AndCR4())*/
    2538     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0);           AssertRCReturn(rc, rc);
    2539     rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0);            AssertRCReturn(rc, rc);
     2537    rc  = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0);
     2538    rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0);
    25402539
    25412540    /*
     
    25442543     * We thus use the exception bitmap to control it rather than use both.
    25452544     */
    2546     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0);       AssertRCReturn(rc, rc);
    2547     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0);      AssertRCReturn(rc, rc);
     2545    rc  = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0);
     2546    rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0);
    25482547
    25492548    /** @todo Explore possibility of using IO-bitmaps. */
    25502549    /* All IO & IOIO instructions cause VM-exits. */
    2551     rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0);           AssertRCReturn(rc, rc);
    2552     rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0);           AssertRCReturn(rc, rc);
     2550    rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0);
     2551    rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0);
    25532552
    25542553    /* Initialize the MSR-bitmap area. */
    2555     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);       AssertRCReturn(rc, rc);
    2556     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);       AssertRCReturn(rc, rc);
    2557     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  0);       AssertRCReturn(rc, rc);
     2554    rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
     2555    rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
     2556    rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  0);
     2557    AssertRCReturn(rc, rc);
    25582558#endif
    25592559
     
    25612561    Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
    25622562    Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf));    /* Lower 4 bits MBZ. */
    2563     rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
    2564     AssertRCReturn(rc, rc);
    2565     rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
     2563    rc  = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
     2564    rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
    25662565    AssertRCReturn(rc, rc);
    25672566
     
    25782577#if 0
    25792578    /* Setup debug controls */
    2580     rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0);        /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
    2581     AssertRCReturn(rc, rc);
    2582     rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
     2579    rc  = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0);       /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
     2580    rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
    25832581    AssertRCReturn(rc, rc);
    25842582#endif
     
    29242922
    29252923    /* Write these host selector fields into the host-state area in the VMCS. */
    2926     rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS);      AssertRCReturn(rc, rc);
    2927     rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS);      AssertRCReturn(rc, rc);
     2924    rc  = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS);
     2925    rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS);
    29282926#if HC_ARCH_BITS == 64
    2929     rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS);      AssertRCReturn(rc, rc);
    2930     rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES);      AssertRCReturn(rc, rc);
    2931     rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS);      AssertRCReturn(rc, rc);
    2932     rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS);      AssertRCReturn(rc, rc);
     2927    rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS);
     2928    rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES);
     2929    rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS);
     2930    rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS);
    29332931#else
    29342932    NOREF(uSelDS);
     
    29372935    NOREF(uSelGS);
    29382936#endif
    2939     rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR);      AssertRCReturn(rc, rc);
     2937    rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR);
     2938    AssertRCReturn(rc, rc);
    29402939
    29412940    /*
     
    29482947    ASMGetGDTR(&Gdtr);
    29492948    ASMGetIDTR(&Idtr);
    2950     rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt);      AssertRCReturn(rc, rc);
    2951     rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt);      AssertRCReturn(rc, rc);
     2949    rc  = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt);
     2950    rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt);
     2951    AssertRCReturn(rc, rc);
    29522952
    29532953#if HC_ARCH_BITS == 64
     
    30303030    uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
    30313031    uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
    3032     rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase);          AssertRCReturn(rc, rc);
    3033     rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase);          AssertRCReturn(rc, rc);
     3032    rc  = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase);
     3033    rc |= VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase);
     3034    AssertRCReturn(rc, rc);
    30343035
    30353036    /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
     
    30703071     * Host Sysenter MSRs.
    30713072     */
    3072     rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS,        ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
    3073     AssertRCReturn(rc, rc);
     3073    rc  = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS,       ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
    30743074#if HC_ARCH_BITS == 32
    3075     rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP,         ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
    3076     AssertRCReturn(rc, rc);
    3077     rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP,         ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
     3075    rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP,        ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
     3076    rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP,        ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
    30783077#else
    3079     rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP,         ASMRdMsr(MSR_IA32_SYSENTER_ESP));
    3080     AssertRCReturn(rc, rc);
    3081     rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP,         ASMRdMsr(MSR_IA32_SYSENTER_EIP));
     3078    rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP,        ASMRdMsr(MSR_IA32_SYSENTER_ESP));
     3079    rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP,        ASMRdMsr(MSR_IA32_SYSENTER_EIP));
    30823080#endif
    30833081    AssertRCReturn(rc, rc);
     
    35983596{
    35993597    int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
    3600     AssertRCReturn(rc, rc);
    3601     rc     = hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
    3602     AssertRCReturn(rc, rc);
    3603     rc     = hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
     3598    rc    |= hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
     3599    rc    |= hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
    36043600    AssertRCReturn(rc, rc);
    36053601    return rc;
     
    38553851                if (CPUMIsGuestInPAEModeEx(pMixedCtx))
    38563852                {
    3857                     rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);                          AssertRCReturn(rc, rc);
    3858                     rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u);     AssertRCReturn(rc, rc);
    3859                     rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u);     AssertRCReturn(rc, rc);
    3860                     rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u);     AssertRCReturn(rc, rc);
    3861                     rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u);     AssertRCReturn(rc, rc);
     3853                    rc  = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
     3854                    AssertRCReturn(rc, rc);
     3855                    rc  = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u);
     3856                    rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u);
     3857                    rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u);
     3858                    rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u);
     3859                    AssertRCReturn(rc, rc);
    38623860                }
    38633861
     
    43454343{
    43464344    int rc = VMXWriteVmcs32(idxSel,    pSelReg->Sel);       /* 16-bit guest selector field. */
    4347     AssertRCReturn(rc, rc);
    4348     rc     = VMXWriteVmcs32(idxLimit,  pSelReg->u32Limit);  /* 32-bit guest segment limit field. */
    4349     AssertRCReturn(rc, rc);
    4350     rc     = VMXWriteVmcsGstN(idxBase, pSelReg->u64Base);   /* Natural width guest segment base field.*/
     4345    rc    |= VMXWriteVmcs32(idxLimit,  pSelReg->u32Limit);  /* 32-bit guest segment limit field. */
     4346    rc    |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base);   /* Natural width guest segment base field.*/
    43514347    AssertRCReturn(rc, rc);
    43524348
     
    45174513               || (u32AccessRights & RT_BIT(15)));              /* Granularity MB1. */
    45184514
    4519         rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR,         u16Sel);                AssertRCReturn(rc, rc);
    4520         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT,         u32Limit);              AssertRCReturn(rc, rc);
    4521         rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE,          u64Base);               AssertRCReturn(rc, rc);
    4522         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights);       AssertRCReturn(rc, rc);
     4515        rc  = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR,         u16Sel);
     4516        rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT,         u32Limit);
     4517        rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE,          u64Base);
     4518        rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights);
     4519        AssertRCReturn(rc, rc);
    45234520
    45244521        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
     
    45314528    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))
    45324529    {
    4533         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt);        AssertRCReturn(rc, rc);
    4534         rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE,  pMixedCtx->gdtr.pGdt);         AssertRCReturn(rc, rc);
     4530        rc  = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt);
     4531        rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE,  pMixedCtx->gdtr.pGdt);
     4532        AssertRCReturn(rc, rc);
    45354533
    45364534        /* Validate. */
     
    45534551            u32Access = pMixedCtx->ldtr.Attr.u;
    45544552
    4555         rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR,         pMixedCtx->ldtr.Sel);         AssertRCReturn(rc, rc);
    4556         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT,         pMixedCtx->ldtr.u32Limit);    AssertRCReturn(rc, rc);
    4557         rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE,          pMixedCtx->ldtr.u64Base);     AssertRCReturn(rc, rc);
    4558         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access);                   AssertRCReturn(rc, rc);
     4553        rc  = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR,         pMixedCtx->ldtr.Sel);
     4554        rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT,         pMixedCtx->ldtr.u32Limit);
     4555        rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE,          pMixedCtx->ldtr.u64Base);
     4556        rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access);
     4557        AssertRCReturn(rc, rc);
    45594558
    45604559        /* Validate. */
     
    45824581    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))
    45834582    {
    4584         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt);         AssertRCReturn(rc, rc);
    4585         rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE,  pMixedCtx->idtr.pIdt);          AssertRCReturn(rc, rc);
     4583        rc  = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt);
     4584        rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE,  pMixedCtx->idtr.pIdt);
     4585        AssertRCReturn(rc, rc);
    45864586
    45874587        /* Validate. */
     
    46304630        if (pVM->hm.s.fAllow64BitGuests)
    46314631        {
    4632             int rc = VINF_SUCCESS;
    4633             rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR,          pMixedCtx->msrLSTAR,        false, NULL);
    4634             rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR,           pMixedCtx->msrSTAR,         false, NULL);
    4635             rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK,        pMixedCtx->msrSFMASK,       false, NULL);
    4636             rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false, NULL);
     4632            int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR,          pMixedCtx->msrLSTAR,        false, NULL);
     4633            rc    |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR,           pMixedCtx->msrSTAR,         false, NULL);
     4634            rc    |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK,        pMixedCtx->msrSFMASK,       false, NULL);
     4635            rc    |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false, NULL);
    46374636            AssertRCReturn(rc, rc);
    46384637# ifdef LOG_ENABLED
     
    56255624    uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
    56265625
    5627     int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
    5628     AssertRCReturn(rc2, rc2);
    5629     rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    5630     AssertRCReturn(rc2, rc2);
     5626    int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);       AssertRCReturn(rc2, rc2);
     5627    rc2     = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);            AssertRCReturn(rc2, rc2);
    56315628
    56325629    VBOXSTRICTRC rcStrict = VINF_SUCCESS;
     
    58285825    {
    58295826        uint32_t uVal    = 0;
    5830         int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0,            &uVal);
    5831         AssertRCReturn(rc, rc);
    5832 
    58335827        uint32_t uShadow = 0;
    5834         rc     = VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
     5828        int rc  = VMXReadVmcs32(VMX_VMCS_GUEST_CR0,            &uVal);
     5829        rc     |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
    58355830        AssertRCReturn(rc, rc);
    58365831
     
    58665861        uint32_t uVal    = 0;
    58675862        uint32_t uShadow = 0;
    5868         rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4,            &uVal);
    5869         AssertRCReturn(rc, rc);
    5870         rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
     5863        rc  = VMXReadVmcs32(VMX_VMCS_GUEST_CR4,            &uVal);
     5864        rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
    58715865        AssertRCReturn(rc, rc);
    58725866
     
    62406234            if (CPUMIsGuestInPAEModeEx(pMixedCtx))  /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */
    62416235            {
    6242                 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);        AssertRCReturn(rc, rc);
    6243                 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);        AssertRCReturn(rc, rc);
    6244                 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);        AssertRCReturn(rc, rc);
    6245                 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);        AssertRCReturn(rc, rc);
     6236                rc  = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);
     6237                rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);
     6238                rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);
     6239                rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);
     6240                AssertRCReturn(rc, rc);
    62466241
    62476242                if (VMMRZCallRing3IsEnabled(pVCpu))
     
    63966391    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS))
    63976392    {
    6398         int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);   AssertRCReturn(rc, rc);
    6399         rc = VMXLOCAL_READ_SEG(CS, cs);                   AssertRCReturn(rc, rc);
    6400         rc = VMXLOCAL_READ_SEG(SS, ss);                   AssertRCReturn(rc, rc);
    6401         rc = VMXLOCAL_READ_SEG(DS, ds);                   AssertRCReturn(rc, rc);
    6402         rc = VMXLOCAL_READ_SEG(ES, es);                   AssertRCReturn(rc, rc);
    6403         rc = VMXLOCAL_READ_SEG(FS, fs);                   AssertRCReturn(rc, rc);
    6404         rc = VMXLOCAL_READ_SEG(GS, gs);                   AssertRCReturn(rc, rc);
     6393        int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
     6394        AssertRCReturn(rc, rc);
     6395
     6396        rc  = VMXLOCAL_READ_SEG(CS, cs);
     6397        rc |= VMXLOCAL_READ_SEG(SS, ss);
     6398        rc |= VMXLOCAL_READ_SEG(DS, ds);
     6399        rc |= VMXLOCAL_READ_SEG(ES, es);
     6400        rc |= VMXLOCAL_READ_SEG(FS, fs);
     6401        rc |= VMXLOCAL_READ_SEG(GS, gs);
     6402        AssertRCReturn(rc, rc);
    64056403
    64066404        /* Restore segment attributes for real-on-v86 mode hack. */
     
    64506448    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR))
    64516449    {
    6452         rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);        AssertRCReturn(rc, rc);
    6453         rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);       AssertRCReturn(rc, rc);
     6450        rc  = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
     6451        rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);      AssertRCReturn(rc, rc);
    64546452        pMixedCtx->gdtr.pGdt  = u64Val;
    64556453        pMixedCtx->gdtr.cbGdt = u32Val;
     
    64606458    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR))
    64616459    {
    6462         rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);        AssertRCReturn(rc, rc);
    6463         rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);       AssertRCReturn(rc, rc);
     6460        rc  = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
     6461        rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);      AssertRCReturn(rc, rc);
    64646462        pMixedCtx->idtr.pIdt  = u64Val;
    64656463        pMixedCtx->idtr.cbIdt = u32Val;
     
    1133011328#if 0
    1133111329        int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    11332         AssertRCReturn(rc, rc);
    1133311330#else
    1133411331        /* Aggressive state sync. for now. */
     
    1159911596
    1160011597    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    11601     rc |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/);
    11602     rc |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
     11598    rc    |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/);
     11599    rc    |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
    1160311600    AssertRCReturn(rc, rc);
    1160411601
     
    1197011967    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
    1197111968    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    11972     rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     11969    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1197311970    AssertRCReturn(rc, rc);
    1197411971
     
    1197711974    PVM pVM                              = pVCpu->CTX_SUFF(pVM);
    1197811975    VBOXSTRICTRC rcStrict;
    11979     rc  = hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, true /*fNeedRsp*/);
     11976    rc = hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, true /*fNeedRsp*/);
    1198011977    switch (uAccessType)
    1198111978    {
     
    1240912406    /* Aggressive state sync. for now. */
    1241012407    int rc  = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
    12411     rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
    12412     rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
     12408    rc     |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
     12409    rc     |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    1241312410#endif
    12414     rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     12411    rc     |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    1241512412    AssertRCReturn(rc, rc);
    1241612413
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette