VirtualBox

Changeset 41328 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
May 16, 2012 10:57:35 AM (13 years ago)
Author:
vboxsync
Message:

VMM/VMMR0/HWVMXR0: Code cleanup, style fixes, wrap to 130 columns.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r41327 r41328  
    6161#endif
    6262
     63
    6364/*******************************************************************************
    6465*   Global Variables                                                           *
     
    7374#endif
    7475
     76
    7577/*******************************************************************************
    7678*   Local Functions                                                            *
    7779*******************************************************************************/
    78 static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc, PCPUMCTX pCtx);
    7980static DECLCALLBACK(void) hmR0VmxSetupTLBEPT(PVM pVM, PVMCPU pVCpu);
    8081static DECLCALLBACK(void) hmR0VmxSetupTLBVPID(PVM pVM, PVMCPU pVCpu);
     
    8586static void hmR0VmxUpdateExceptionBitmap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
    8687static void hmR0VmxSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite);
    87 
    88 
     88static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc, PCPUMCTX pCtx);
     89
     90
     91/**
     92 * Updates error from VMCS to HWACCMCPU's lasterror record.
     93 *
     94 * @param    pVM            Pointer to the VM.
     95 * @param    pVCpu          Pointer to the VMCPU.
     96 * @param    rc             The error code.
     97 */
    8998static void hmR0VmxCheckError(PVM pVM, PVMCPU pVCpu, int rc)
    9099{
     
    99108}
    100109
     110
    101111/**
    102  * Sets up and activates VT-x on the current CPU
     112 * Sets up and activates VT-x on the current CPU.
    103113 *
    104114 * @returns VBox status code.
    105  * @param   pCpu            CPU info struct
    106  * @param   pVM             The VM to operate on. (can be NULL after a resume!!)
    107  * @param   pvCpuPage       Pointer to the global cpu page.
    108  * @param   HCPhysCpuPage   Physical address of the global cpu page.
     115 * @param   pCpu            Pointer to the CPU info struct.
     116 * @param   pVM             Pointer to the VM. (can be NULL after a resume!!)
     117 * @param   pvCpuPage       Pointer to the global CPU page.
     118 * @param   HCPhysCpuPage   Physical address of the global CPU page.
    109119 */
    110120VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     
    112122    AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
    113123    AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
    114     NOREF(pCpu);
    115124
    116125    if (pVM)
     
    127136        return VERR_VMX_IN_VMX_ROOT_MODE;
    128137
    129     /* Make sure the VMX instructions don't cause #UD faults. */
    130     ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
    131 
    132     /* Enter VMX Root Mode. */
     138    ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);    /* Make sure the VMX instructions don't cause #UD faults. */
     139
     140    /*
     141     * Enter VM root mode.
     142     */
    133143    int rc = VMXEnable(HCPhysCpuPage);
    134144    if (RT_FAILURE(rc))
     
    156166}
    157167
     168
    158169/**
    159  * Deactivates VT-x on the current CPU
     170 * Deactivates VT-x on the current CPU.
    160171 *
    161172 * @returns VBox status code.
    162  * @param   pCpu            CPU info struct
    163  * @param   pvCpuPage       Pointer to the global cpu page.
    164  * @param   HCPhysCpuPage   Physical address of the global cpu page.
     173 * @param   pCpu            Pointer to the CPU info struct.
     174 * @param   pvCpuPage       Pointer to the global CPU page.
     175 * @param   HCPhysCpuPage   Physical address of the global CPU page.
    165176 */
    166177VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     
    182193}
    183194
     195
    184196/**
    185  * Does Ring-0 per VM VT-x init.
     197 * Does Ring-0 per VM VT-x initialization.
    186198 *
    187199 * @returns VBox status code.
    188  * @param   pVM         The VM to operate on.
     200 * @param   pVM         Pointer to the VM.
    189201 */
    190202VMMR0DECL(int) VMXR0InitVM(PVM pVM)
     
    309321}
    310322
     323
    311324/**
    312325 * Does Ring-0 per VM VT-x termination.
    313326 *
    314327 * @returns VBox status code.
    315  * @param   pVM         The VM to operate on.
     328 * @param   pVM         Pointer to the VM.
    316329 */
    317330VMMR0DECL(int) VMXR0TermVM(PVM pVM)
     
    379392}
    380393
     394
    381395/**
    382  * Sets up VT-x for the specified VM
     396 * Sets up VT-x for the specified VM.
    383397 *
    384398 * @returns VBox status code.
    385  * @param   pVM         The VM to operate on.
     399 * @param   pVM         Pointer to the VM.
    386400 */
    387401VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
     
    445459        *(uint32_t *)pVCpu->hwaccm.s.vmx.pvVMCS = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
    446460
    447         /* Clear VM Control Structure. */
     461        /*
     462         * Clear and activate the VMCS.
     463         */
    448464        Log(("HCPhysVMCS  = %RHp\n", pVCpu->hwaccm.s.vmx.HCPhysVMCS));
    449465        rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     
    451467            goto vmx_end;
    452468
    453         /* Activate the VM Control Structure. */
    454469        rc = VMXActivateVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
    455470        if (RT_FAILURE(rc))
    456471            goto vmx_end;
    457472
    458         /* VMX_VMCS_CTRL_PIN_EXEC_CONTROLS
     473        /*
     474         * VMX_VMCS_CTRL_PIN_EXEC_CONTROLS
    459475         * Set required bits to one and zero according to the MSR capabilities.
    460476         */
    461         val  = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
    462         /* External and non-maskable interrupts cause VM-exits. */
    463         val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT;
    464         /* enable the preemption timer. */
     477        val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
     478        val |=    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT      /* External interrupts */
     479                | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT;         /* Non-maskable interrupts */
     480
     481        /*
     482         * Enable the VMX preemption timer.
     483         */
    465484        if (pVM->hwaccm.s.vmx.fUsePreemptTimer)
    466485            val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER;
     
    470489        AssertRC(rc);
    471490
    472         /* VMX_VMCS_CTRL_PROC_EXEC_CONTROLS
     491        /*
     492         * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS
    473493         * Set required bits to one and zero according to the MSR capabilities.
    474494         */
    475495        val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
    476496        /* Program which event cause VM-exits and which features we want to use. */
    477         val = val | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT
    478                   | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET
    479                   | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT
    480                   | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT
    481                   | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT
    482                   | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT
    483                   | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT;    /* don't execute mwait or else we'll idle inside the guest (host thinks the cpu load is high) */
     497        val |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT
     498               | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET
     499               | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT
     500               | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT
     501               | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT
     502               | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT
     503               | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT;     /* don't execute mwait or else we'll idle inside
     504                                                                      the guest (host thinks the cpu load is high) */
    484505
    485506        /* Without nested paging we should intercept invlpg and cr3 mov instructions. */
    486507        if (!pVM->hwaccm.s.fNestedPaging)
    487             val |=  VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
    488                   | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
    489                   | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
    490 
    491         /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT might cause a vmlaunch failure with an invalid control fields error. (combined with some other exit reasons) */
     508        {
     509            val |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
     510                   | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
     511                   | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
     512        }
     513
     514        /*
     515         * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT might cause a vmlaunch
     516         * failure with an invalid control fields error. (combined with some other exit reasons)
     517         */
    492518        if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
    493519        {
     
    519545        if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    520546        {
    521             /* VMX_VMCS_CTRL_PROC_EXEC_CONTROLS2
     547            /*
     548             * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS2
    522549             * Set required bits to one and zero according to the MSR capabilities.
    523550             */
     
    545572        }
    546573
    547         /* VMX_VMCS_CTRL_CR3_TARGET_COUNT
     574        /*
     575         * VMX_VMCS_CTRL_CR3_TARGET_COUNT
    548576         * Set required bits to one and zero according to the MSR capabilities.
    549577         */
     
    551579        AssertRC(rc);
    552580
    553         /* Forward all exception except #NM & #PF to the guest.
     581        /*
     582         * Forward all exception except #NM & #PF to the guest.
    554583         * We always need to check pagefaults since our shadow page table can be out of sync.
    555          * And we always lazily sync the FPU & XMM state.
     584         * And we always lazily sync the FPU & XMM state.                                                           .
    556585         */
    557586
     
    564593         */
    565594
    566         /* Don't filter page faults; all of them should cause a switch. */
     595        /*
     596         * Don't filter page faults, all of them should cause a world switch.
     597         */
    567598        rc  = VMXWriteVMCS(VMX_VMCS_CTRL_PAGEFAULT_ERROR_MASK, 0);
    568         rc |= VMXWriteVMCS(VMX_VMCS_CTRL_PAGEFAULT_ERROR_MATCH, 0);
    569599        AssertRC(rc);
    570 
    571         /* Init TSC offset to zero. */
     600        rc = VMXWriteVMCS(VMX_VMCS_CTRL_PAGEFAULT_ERROR_MATCH, 0);
     601        AssertRC(rc);
     602
    572603        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_TSC_OFFSET_FULL, 0);
    573604        AssertRC(rc);
    574 
    575605        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_IO_BITMAP_A_FULL, 0);
    576606        AssertRC(rc);
    577 
    578607        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_IO_BITMAP_B_FULL, 0);
    579608        AssertRC(rc);
    580609
    581         /* Set the MSR bitmap address. */
     610        /*
     611         * Set the MSR bitmap address.
     612         */
    582613        if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
    583614        {
     
    600631
    601632#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    602         /* Set the guest & host MSR load/store physical addresses. */
     633        /*
     634         * Set the guest & host MSR load/store physical addresses.
     635         */
    603636        Assert(pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
    604637        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
     
    614647        rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
    615648        AssertRC(rc);
    616 
    617649        rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, 0);
    618650        AssertRC(rc);
     
    635667        AssertRC(rc);
    636668
    637         /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */
     669        /*
     670         * Clear VMCS, marking it inactive. Clear implementation specific data and writing back
     671         * VMCS data back to memory.
     672         */
    638673        rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
    639674        AssertRC(rc);
    640675
    641         /* Configure the VMCS read cache. */
     676        /*
     677         * Configure the VMCS read cache.
     678         */
    642679        PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
    643680
     
    659696        VMXSetupCachedReadVMCS(pCache, VMX_VMCS64_GUEST_IDTR_BASE);
    660697
    661         VMX_SETUP_SELREG(ES, pCache);
    662         VMX_SETUP_SELREG(SS, pCache);
    663         VMX_SETUP_SELREG(CS, pCache);
    664         VMX_SETUP_SELREG(DS, pCache);
    665         VMX_SETUP_SELREG(FS, pCache);
    666         VMX_SETUP_SELREG(GS, pCache);
     698        VMX_SETUP_SELREG(ES,   pCache);
     699        VMX_SETUP_SELREG(SS,   pCache);
     700        VMX_SETUP_SELREG(CS,   pCache);
     701        VMX_SETUP_SELREG(DS,   pCache);
     702        VMX_SETUP_SELREG(FS,   pCache);
     703        VMX_SETUP_SELREG(GS,   pCache);
    667704        VMX_SETUP_SELREG(LDTR, pCache);
    668         VMX_SETUP_SELREG(TR, pCache);
    669 
    670         /* Status code VMCS reads. */
     705        VMX_SETUP_SELREG(TR,   pCache);
     706
     707        /*
     708         * Status code VMCS reads.
     709         */
    671710        VMXSetupCachedReadVMCS(pCache, VMX_VMCS32_RO_EXIT_REASON);
    672711        VMXSetupCachedReadVMCS(pCache, VMX_VMCS32_RO_VM_INSTR_ERROR);
     
    689728    } /* for each VMCPU */
    690729
    691     /* Choose the right TLB setup function. */
     730    /*
     731     * Setup the right TLB function based on CPU capabilities.
     732     */
    692733    if (pVM->hwaccm.s.fNestedPaging && pVM->hwaccm.s.vmx.fVPID)
    693734        pVM->hwaccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBBoth;
     
    704745}
    705746
     747
    706748/**
    707  * Sets the permission bits for the specified MSR
     749 * Sets the permission bits for the specified MSR.
    708750 *
    709  * @param   pVCpu       The VMCPU to operate on.
    710  * @param   ulMSR       MSR value
    711  * @param   fRead       Reading allowed/disallowed
    712  * @param   fWrite      Writing allowed/disallowed
     751 * @param   pVCpu       Pointer to the VMCPU.
     752 * @param   ulMSR       The MSR value.
     753 * @param   fRead       Whether reading is allowed.
     754 * @param   fWrite      Whether writing is allowed.
    713755 */
    714756static void hmR0VmxSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite)
     
    717759    uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hwaccm.s.vmx.pMSRBitmap;
    718760
    719     /* Layout:
     761    /*
     762     * Layout:
    720763     * 0x000 - 0x3ff - Low MSR read bits
    721764     * 0x400 - 0x7ff - High MSR read bits
     
    729772    }
    730773    else
    731     if (    ulMSR >= 0xC0000000
    732         &&  ulMSR <= 0xC0001FFF)
     774    if (   ulMSR >= 0xC0000000
     775        && ulMSR <= 0xC0001FFF)
    733776    {
    734777        /* AMD Sixth Generation x86 Processor MSRs */
     
    756799
    757800/**
    758  * Injects an event (trap or external interrupt)
     801 * Injects an event (trap or external interrupt).
    759802 *
    760803 * @returns VBox status code.  Note that it may return VINF_EM_RESET to
    761804 *          indicate a triple fault when injecting X86_XCPT_DF.
    762805 *
    763  * @param   pVM         The VM to operate on.
    764  * @param   pVCpu       The VMCPU to operate on.
    765  * @param   pCtx        CPU Context
    766  * @param   intInfo     VMX interrupt info
    767  * @param   cbInstr     Opcode length of faulting instruction
    768  * @param   errCode     Error code (optional)
     806 * @param   pVM         Pointer to the VM.
     807 * @param   pVCpu       Pointer to the VMCPU.
     808 * @param   pCtx        Pointer to the guest CPU Context.
     809 * @param   intInfo     VMX interrupt info.
     810 * @param   cbInstr     Opcode length of faulting instruction.
     811 * @param   errCode     Error code (optional).
    769812 */
    770813static int hmR0VmxInjectEvent(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t intInfo, uint32_t cbInstr, uint32_t errCode)
     
    779822#ifdef VBOX_STRICT
    780823    if (iGate == 0xE)
    781         LogFlow(("hmR0VmxInjectEvent: Injecting interrupt %d at %RGv error code=%08x CR2=%RGv intInfo=%08x\n", iGate, (RTGCPTR)pCtx->rip, errCode, pCtx->cr2, intInfo));
    782     else
    783     if (iGate < 0x20)
    784         LogFlow(("hmR0VmxInjectEvent: Injecting interrupt %d at %RGv error code=%08x\n", iGate, (RTGCPTR)pCtx->rip, errCode));
     824    {
     825        LogFlow(("hmR0VmxInjectEvent: Injecting interrupt %d at %RGv error code=%08x CR2=%RGv intInfo=%08x\n", iGate,
     826                 (RTGCPTR)pCtx->rip, errCode, pCtx->cr2, intInfo));
     827    }
     828    else if (iGate < 0x20)
     829    {
     830        LogFlow(("hmR0VmxInjectEvent: Injecting interrupt %d at %RGv error code=%08x\n", iGate, (RTGCPTR)pCtx->rip,
     831                 errCode));
     832    }
    785833    else
    786834    {
     
    800848        RTSEL    sel;
    801849
    802         /* Injecting events doesn't work right with real mode emulation.
     850        /*
     851         * Injecting events doesn't work right with real mode emulation.
    803852         * (#GP if we try to inject external hardware interrupts)
    804853         * Inject the interrupt or trap directly instead.
     
    808857        Log(("Manual interrupt/trap '%x' inject (real mode)\n", iGate));
    809858
    810         /* Check if the interrupt handler is present. */
     859        /*
     860         * Check if the interrupt handler is present.
     861         */
    811862        if (iGate * 4 + 3 > pCtx->idtr.cbIdt)
    812863        {
     
    835886            ip = pCtx->ip;
    836887
    837         /* Read the selector:offset pair of the interrupt handler. */
     888        /*
     889         * Read the selector:offset pair of the interrupt handler.
     890         */
    838891        GCPhysHandler = (RTGCPHYS)pCtx->idtr.pIdt + iGate * 4;
    839892        rc = PGMPhysSimpleReadGCPhys(pVM, &offset, GCPhysHandler,     sizeof(offset)); AssertRC(rc);
     
    842895        LogFlow(("IDT handler %04X:%04X\n", sel, offset));
    843896
    844         /* Construct the stack frame. */
    845         /** @todo should check stack limit. */
     897        /*
     898         * Construct the stack frame.
     899         */
     900        /** @todo Check stack limit. */
    846901        pCtx->sp -= 2;
    847902        LogFlow(("ss:sp %04X:%04X eflags=%x\n", pCtx->ss, pCtx->sp, pCtx->eflags.u));
     
    854909        rc = PGMPhysSimpleWriteGCPhys(pVM, pCtx->ssHid.u64Base + pCtx->sp, &ip, sizeof(ip)); AssertRC(rc);
    855910
    856         /* Update the CPU state for executing the handler. */
     911        /*
     912         * Update the CPU state for executing the handler.
     913         */
    857914        pCtx->rip           = offset;
    858915        pCtx->cs            = sel;
    859916        pCtx->csHid.u64Base = sel << 4;
    860         pCtx->eflags.u     &= ~(X86_EFL_IF|X86_EFL_TF|X86_EFL_RF|X86_EFL_AC);
     917        pCtx->eflags.u     &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
    861918
    862919        pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_SEGMENT_REGS;
     
    864921    }
    865922
    866     /* Set event injection state. */
     923    /*
     924     * Set event injection state.
     925     */
    867926    rc  = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_IRQ_INFO, intInfo | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT));
    868 
    869927    rc |= VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
    870928    rc |= VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_EXCEPTION_ERRCODE, errCode);
     
    876934
    877935/**
    878  * Checks for pending guest interrupts and injects them
     936 * Checks for pending guest interrupts and injects them into the guest.
    879937 *
    880938 * @returns VBox status code.
    881  * @param   pVM         The VM to operate on.
    882  * @param   pVCpu       The VMCPU to operate on.
    883  * @param   pCtx        CPU Context
     939 * @param   pVM         Pointer to the VM.
     940 * @param   pVCpu       Pointer to the VMCPU.
     941 * @param   pCtx        Pointer to the guest CPU context.
    884942 */
    885943static int hmR0VmxCheckPendingInterrupt(PVM pVM, PVMCPU pVCpu, CPUMCTX *pCtx)
     
    887945    int rc;
    888946
    889     /* Dispatch any pending interrupts. (injected before, but a VM exit occurred prematurely) */
     947    /*
     948     * Dispatch any pending interrupts (injected before, but a VM exit occurred prematurely).
     949     */
    890950    if (pVCpu->hwaccm.s.Event.fPending)
    891951    {
    892         Log(("CPU%d: Reinjecting event %RX64 %08x at %RGv cr2=%RX64\n", pVCpu->idCpu, pVCpu->hwaccm.s.Event.intInfo, pVCpu->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip, pCtx->cr2));
     952        Log(("CPU%d: Reinjecting event %RX64 %08x at %RGv cr2=%RX64\n", pVCpu->idCpu, pVCpu->hwaccm.s.Event.intInfo,
     953             pVCpu->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip, pCtx->cr2));
    893954        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatIntReinject);
    894955        rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, pVCpu->hwaccm.s.Event.intInfo, 0, pVCpu->hwaccm.s.Event.errCode);
     
    899960    }
    900961
    901     /* If an active trap is already pending, then we must forward it first! */
     962    /*
     963     * If an active trap is already pending, we must forward it first!
     964     */
    902965    if (!TRPMHasTrap(pVCpu))
    903966    {
     
    918981        }
    919982
    920         /* @todo SMI interrupts. */
    921 
    922         /* When external interrupts are pending, we should exit the VM when IF is set. */
     983        /** @todo SMI interrupts. */
     984
     985        /*
     986         * When external interrupts are pending, we should exit the VM when IF is set.
     987         */
    923988        if (VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)))
    924989        {
     
    934999                /* else nothing to do but wait */
    9351000            }
    936             else
    937             if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     1001            else if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    9381002            {
    9391003                uint8_t u8Interrupt;
    9401004
    9411005                rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
    942                 Log(("CPU%d: Dispatch interrupt: u8Interrupt=%x (%d) rc=%Rrc cs:rip=%04X:%RGv\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc, pCtx->cs, (RTGCPTR)pCtx->rip));
     1006                Log(("CPU%d: Dispatch interrupt: u8Interrupt=%x (%d) rc=%Rrc cs:rip=%04X:%RGv\n", pVCpu->idCpu,
     1007                     u8Interrupt, u8Interrupt, rc, pCtx->cs, (RTGCPTR)pCtx->rip));
    9431008                if (RT_SUCCESS(rc))
    9441009                {
     
    9621027    if (TRPMHasTrap(pVCpu))
    9631028    {
    964         uint8_t     u8Vector;
     1029        uint8_t u8Vector;
    9651030        rc = TRPMQueryTrapAll(pVCpu, &u8Vector, 0, 0, 0);
    9661031        AssertRC(rc);
     
    9681033#endif
    9691034
    970     if (    (pCtx->eflags.u32 & X86_EFL_IF)
     1035    if (   (pCtx->eflags.u32 & X86_EFL_IF)
    9711036        && (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    9721037        && TRPMHasTrap(pVCpu)
     
    9781043        RTGCUINT    errCode;
    9791044
    980         /* If a new event is pending, then dispatch it now. */
     1045        /*
     1046         * If a new event is pending, dispatch it now.
     1047         */
    9811048        rc = TRPMQueryTrapAll(pVCpu, &u8Vector, &enmType, &errCode, 0);
    9821049        AssertRC(rc);
     
    9841051        Assert(enmType != TRPM_SOFTWARE_INT);
    9851052
    986         /* Clear the pending trap. */
     1053        /*
     1054         * Clear the pending trap.
     1055         */
    9871056        rc = TRPMResetTrap(pVCpu);
    9881057        AssertRC(rc);
     
    9931062        if (enmType == TRPM_TRAP)
    9941063        {
    995             switch (u8Vector) {
    996             case X86_XCPT_DF:
    997             case X86_XCPT_TS:
    998             case X86_XCPT_NP:
    999             case X86_XCPT_SS:
    1000             case X86_XCPT_GP:
    1001             case X86_XCPT_PF:
    1002             case X86_XCPT_AC:
    1003                 /* Valid error codes. */
    1004                 intInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
    1005                 break;
    1006             default:
    1007                 break;
     1064            switch (u8Vector)
     1065            {
     1066                case X86_XCPT_DF:
     1067                case X86_XCPT_TS:
     1068                case X86_XCPT_NP:
     1069                case X86_XCPT_SS:
     1070                case X86_XCPT_GP:
     1071                case X86_XCPT_PF:
     1072                case X86_XCPT_AC:
     1073                {
     1074                    /* Valid error codes. */
     1075                    intInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
     1076                    break;
     1077                }
     1078
     1079                default:
     1080                    break;
    10081081            }
    1009             if (u8Vector == X86_XCPT_BP || u8Vector == X86_XCPT_OF)
     1082
     1083            if (   u8Vector == X86_XCPT_BP
     1084                || u8Vector == X86_XCPT_OF)
     1085            {
    10101086                intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
     1087            }
    10111088            else
    10121089                intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HWEXCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
     
    10231100}
    10241101
     1102
    10251103/**
    1026  * Save the host state
     1104 * Save the host state into the VMCS.
    10271105 *
    10281106 * @returns VBox status code.
    1029  * @param   pVM         The VM to operate on.
    1030  * @param   pVCpu       The VMCPU to operate on.
     1107 * @param   pVM         Pointer to the VM.
     1108 * @param   pVCpu       Pointer to the VMCPU.
    10311109 */
    10321110VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
     
    10361114
    10371115    /*
    1038      * Host CPU Context
     1116     * Host CPU Context.
    10391117     */
    10401118    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT)
     
    10491127        uint64_t    cr3;
    10501128
    1051         /* Control registers */
    1052         rc  = VMXWriteVMCS(VMX_VMCS_HOST_CR0,               ASMGetCR0());
     1129        /*
     1130         * Control registers.
     1131         */
     1132        rc  = VMXWriteVMCS(VMX_VMCS_HOST_CR0,           ASMGetCR0());
     1133        Log2(("VMX_VMCS_HOST_CR0 %08x\n",               ASMGetCR0()));
    10531134#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    10541135        if (VMX_IS_64BIT_HOST_MODE())
    10551136        {
    10561137            cr3 = hwaccmR0Get64bitCR3();
    1057             rc |= VMXWriteVMCS64(VMX_VMCS_HOST_CR3,         cr3);
     1138            rc |= VMXWriteVMCS64(VMX_VMCS_HOST_CR3,     cr3);
    10581139        }
    10591140        else
     
    10611142        {
    10621143            cr3 = ASMGetCR3();
    1063             rc |= VMXWriteVMCS(VMX_VMCS_HOST_CR3,           cr3);
    1064         }
    1065         rc |= VMXWriteVMCS(VMX_VMCS_HOST_CR4,               ASMGetCR4());
     1144            rc |= VMXWriteVMCS(VMX_VMCS_HOST_CR3,       cr3);
     1145        }
     1146        Log2(("VMX_VMCS_HOST_CR3 %08RX64\n",            cr3));
     1147        rc |= VMXWriteVMCS(VMX_VMCS_HOST_CR4,           ASMGetCR4());
     1148        Log2(("VMX_VMCS_HOST_CR4 %08x\n",               ASMGetCR4()));
    10661149        AssertRC(rc);
    1067         Log2(("VMX_VMCS_HOST_CR0 %08x\n", ASMGetCR0()));
    1068         Log2(("VMX_VMCS_HOST_CR3 %08RX64\n", cr3));
    1069         Log2(("VMX_VMCS_HOST_CR4 %08x\n", ASMGetCR4()));
    1070 
    1071         /* Selector registers. */
     1150
     1151        /*
     1152         * Selector registers.
     1153         */
    10721154#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    10731155        if (VMX_IS_64BIT_HOST_MODE())
     
    11111193        Log2(("VMX_VMCS_HOST_FIELD_TR %08x\n", ASMGetTR()));
    11121194
    1113         /* GDTR & IDTR */
     1195        /*
     1196         * GDTR & IDTR.
     1197         */
    11141198#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    11151199        if (VMX_IS_64BIT_HOST_MODE())
     
    11371221        }
    11381222
    1139         /* Save the base address of the TR selector. */
     1223        /*
     1224         * Save the base address of the TR selector.
     1225         */
    11401226        if (SelTR > gdtr.cbGdt)
    11411227        {
     
    11661252        }
    11671253
    1168         /* FS and GS base. */
     1254        /*
     1255         * FS base and GS base.
     1256         */
    11691257#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    11701258        if (VMX_IS_64BIT_HOST_MODE())
     
    11721260            Log2(("MSR_K8_FS_BASE = %RX64\n", ASMRdMsr(MSR_K8_FS_BASE)));
    11731261            Log2(("MSR_K8_GS_BASE = %RX64\n", ASMRdMsr(MSR_K8_GS_BASE)));
    1174             rc  = VMXWriteVMCS64(VMX_VMCS_HOST_FS_BASE,     ASMRdMsr(MSR_K8_FS_BASE));
    1175             rc |= VMXWriteVMCS64(VMX_VMCS_HOST_GS_BASE,     ASMRdMsr(MSR_K8_GS_BASE));
     1262            rc  = VMXWriteVMCS64(VMX_VMCS_HOST_FS_BASE,         ASMRdMsr(MSR_K8_FS_BASE));
     1263            rc |= VMXWriteVMCS64(VMX_VMCS_HOST_GS_BASE,         ASMRdMsr(MSR_K8_GS_BASE));
    11761264        }
    11771265#endif
    11781266        AssertRC(rc);
    11791267
    1180         /* Sysenter MSRs. */
     1268        /*
     1269         * Sysenter MSRs.
     1270         */
    11811271        /** @todo expensive!! */
    1182         rc  = VMXWriteVMCS(VMX_VMCS32_HOST_SYSENTER_CS,       ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
    1183         Log2(("VMX_VMCS_HOST_SYSENTER_CS  %08x\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)));
     1272        rc  = VMXWriteVMCS(VMX_VMCS32_HOST_SYSENTER_CS,         ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
     1273        Log2(("VMX_VMCS_HOST_SYSENTER_CS  %08x\n",              ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)));
    11841274#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    11851275        if (VMX_IS_64BIT_HOST_MODE())
     
    11921282        else
    11931283        {
    1194             rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_ESP,  ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
    1195             rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_EIP,  ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
    1196             Log2(("VMX_VMCS_HOST_SYSENTER_EIP %RX32\n",     ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)));
    1197             Log2(("VMX_VMCS_HOST_SYSENTER_ESP %RX32\n",     ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)));
     1284            rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_ESP,      ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
     1285            rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_EIP,      ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
     1286            Log2(("VMX_VMCS_HOST_SYSENTER_EIP %RX32\n",         ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)));
     1287            Log2(("VMX_VMCS_HOST_SYSENTER_ESP %RX32\n",         ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)));
    11981288        }
    11991289#elif HC_ARCH_BITS == 32
    1200         rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_ESP,      ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
    1201         rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_EIP,      ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
    1202         Log2(("VMX_VMCS_HOST_SYSENTER_EIP %RX32\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)));
    1203         Log2(("VMX_VMCS_HOST_SYSENTER_ESP %RX32\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)));
     1290        rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_ESP,          ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
     1291        rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_EIP,          ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
     1292        Log2(("VMX_VMCS_HOST_SYSENTER_EIP %RX32\n",             ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)));
     1293        Log2(("VMX_VMCS_HOST_SYSENTER_ESP %RX32\n",             ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)));
    12041294#else
    1205         Log2(("VMX_VMCS_HOST_SYSENTER_EIP %RX64\n", ASMRdMsr(MSR_IA32_SYSENTER_EIP)));
    1206         Log2(("VMX_VMCS_HOST_SYSENTER_ESP %RX64\n", ASMRdMsr(MSR_IA32_SYSENTER_ESP)));
    1207         rc |= VMXWriteVMCS64(VMX_VMCS_HOST_SYSENTER_ESP,      ASMRdMsr(MSR_IA32_SYSENTER_ESP));
    1208         rc |= VMXWriteVMCS64(VMX_VMCS_HOST_SYSENTER_EIP,      ASMRdMsr(MSR_IA32_SYSENTER_EIP));
     1295        Log2(("VMX_VMCS_HOST_SYSENTER_EIP %RX64\n",             ASMRdMsr(MSR_IA32_SYSENTER_EIP)));
     1296        Log2(("VMX_VMCS_HOST_SYSENTER_ESP %RX64\n",             ASMRdMsr(MSR_IA32_SYSENTER_ESP)));
     1297            rc |= VMXWriteVMCS64(VMX_VMCS_HOST_SYSENTER_ESP,    ASMRdMsr(MSR_IA32_SYSENTER_ESP));
     1298        rc |= VMXWriteVMCS64(VMX_VMCS_HOST_SYSENTER_EIP,        ASMRdMsr(MSR_IA32_SYSENTER_EIP));
    12091299#endif
    12101300        AssertRC(rc);
    12111301
     1302
    12121303#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    1213         /* Store all host MSRs in the VM-Exit load area, so they will be reloaded after the world switch back to the host. */
     1304        /*
     1305         * Store all host MSRs in the VM-Exit load area, so they will be reloaded after
     1306         * the world switch back to the host.
     1307         */
    12141308        PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pHostMSR;
    12151309        unsigned idxMsr = 0;
    12161310
    1217         /* EFER MSR present? */
     1311        /*
     1312         * Check if EFER MSR present.
     1313         */
    12181314        if (ASMCpuId_EDX(0x80000001) & (X86_CPUID_AMD_FEATURE_EDX_NX|X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
    12191315        {
     
    12311327            if (CPUMIsGuestInLongMode(pVCpu))
    12321328            {
    1233                 /* Must match the efer value in our 64 bits switcher. */
     1329                /* Must match the EFER value in our 64 bits switcher. */
    12341330                pMsr->u64Value    = ASMRdMsr(MSR_K6_EFER) | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE;
    12351331            }
     
    12661362}
    12671363
     1364
    12681365/**
    12691366 * Loads the 4 PDPEs into the guest state when nested paging is used and the
    12701367 * guest operates in PAE mode.
    12711368 *
    1272  * @returns VINF_SUCCESS or fatal error.
    1273  * @param   pVCpu       The VMCPU to operate on.
    1274  * @param   pCtx        Guest context
     1369 * @returns VBox status code.
     1370 * @param   pVCpu       Pointer to the VMCPU.
     1371 * @param   pCtx        Pointer to the guest CPU context.
    12751372 */
    12761373static int hmR0VmxLoadPaePdpes(PVMCPU pVCpu, PCPUMCTX pCtx)
     
    12901387}
    12911388
     1389
    12921390/**
    12931391 * Saves the 4 PDPEs into the guest state when nested paging is used and the
    12941392 * guest operates in PAE mode.
    12951393 *
    1296  * @returns VINF_SUCCESS or fatal error.
    1297  * @param   pVCpu       The VMCPU to operate on.
    1298  * @param   pCtx        Guest context
     1394 * @returns VBox status code.
     1395 * @param   pVCpu       Pointer to the VM CPU.
     1396 * @param   pCtx        Pointer to the guest CPU context.
    12991397 *
    13001398 * @remarks Tell PGM about CR3 changes before calling this helper.
     
    13191417
    13201418/**
    1321  * Update the exception bitmap according to the current CPU state
     1419 * Update the exception bitmap according to the current CPU state.
    13221420 *
    1323  * @param   pVM         The VM to operate on.
    1324  * @param   pVCpu       The VMCPU to operate on.
    1325  * @param   pCtx        Guest context
     1421 * @param   pVM         Pointer to the VM.
     1422 * @param   pVCpu       Pointer to the VMCPU.
     1423 * @param   pCtx        Pointer to the guest CPU context.
    13261424 */
    13271425static void hmR0VmxUpdateExceptionBitmap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     
    13301428    Assert(pCtx);
    13311429
    1332     /* Set up a mask for intercepting traps. */
     1430    /*
     1431     * Set up a mask for intercepting traps.
     1432     */
    13331433    /** @todo Do we really need to always intercept #DB? */
    13341434    u32TrapMask  =   RT_BIT(X86_XCPT_DB)
     
    13501450                   ;
    13511451
     1452    /*
     1453     * Without nested paging, #PF must be intercepted to implement shadow paging.
     1454     */
    13521455    /** @todo NP state won't change so maybe we should build the initial trap mask up front? */
    1353     /* Without nested paging, #PF must be intercepted to implement shadow paging. */
    13541456    if (!pVM->hwaccm.s.fNestedPaging)
    13551457        u32TrapMask |= RT_BIT(X86_XCPT_PF);
    13561458
    1357     /* Also catch floating point exceptions if we need to report them to the guest in a different way. */
     1459    /* Catch floating point exceptions if we need to report them to the guest in a different way. */
    13581460    if (!(pCtx->cr0 & X86_CR0_NE))
    1359     {
    13601461        u32TrapMask |= RT_BIT(X86_XCPT_MF);
    1361     }
    13621462
    13631463#ifdef VBOX_STRICT
     
    13651465#endif
    13661466
    1367     /* Intercept all exceptions in real mode as none of them can be injected directly (#GP otherwise). */
     1467    /*
     1468     * Intercept all exceptions in real mode as none of them can be injected directly (#GP otherwise).
     1469     */
    13681470    /** @todo Despite the claim to intercept everything, with NP we do not intercept #PF. Should we? */
    13691471    if (    CPUMIsGuestInRealModeEx(pCtx)
    13701472        &&  pVM->hwaccm.s.vmx.pRealModeTSS)
     1473    {
    13711474        u32TrapMask |=   RT_BIT(X86_XCPT_DE)
    13721475                       | RT_BIT(X86_XCPT_DB)
     
    13871490                       | RT_BIT(X86_XCPT_XF)
    13881491                       ;
     1492    }
    13891493
    13901494    int rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXCEPTION_BITMAP, u32TrapMask);
     
    13921496}
    13931497
     1498
    13941499/**
    1395  * Loads a minimal guest state
     1500 * Loads a minimal guest state.
    13961501 *
    13971502 * NOTE: Don't do anything here that can cause a jump back to ring 3!!!!!
    13981503 *
    1399  * @param   pVM         The VM to operate on.
    1400  * @param   pVCpu       The VMCPU to operate on.
    1401  * @param   pCtx        Guest context
     1504 * @param   pVM         Pointer to the VM.
     1505 * @param   pVCpu       Pointer to the VMCPU.
     1506 * @param   pCtx        Pointer to the guest CPU context.
    14021507 */
    14031508VMMR0DECL(void) VMXR0LoadMinimalGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     
    14081513    Assert(!(pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_ALL_GUEST));
    14091514
    1410     /* EIP, ESP and EFLAGS */
     1515    /*
     1516     * Load EIP, ESP and EFLAGS.
     1517     */
    14111518    rc  = VMXWriteVMCS64(VMX_VMCS64_GUEST_RIP, pCtx->rip);
    14121519    rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_RSP, pCtx->rsp);
    14131520    AssertRC(rc);
    14141521
    1415     /* Bits 22-31, 15, 5 & 3 must be zero. Bit 1 must be 1. */
     1522    /*
     1523     * Bits 22-31, 15, 5 & 3 must be zero. Bit 1 must be 1.
     1524     */
    14161525    eflags      = pCtx->eflags;
    14171526    eflags.u32 &= VMX_EFLAGS_RESERVED_0;
    14181527    eflags.u32 |= VMX_EFLAGS_RESERVED_1;
    14191528
    1420     /* Real mode emulation using v86 mode. */
     1529    /*
     1530     * Check if real mode emulation using v86 mode.
     1531     */
    14211532    if (    CPUMIsGuestInRealModeEx(pCtx)
    14221533        &&  pVM->hwaccm.s.vmx.pRealModeTSS)
     
    14311542}
    14321543
     1544
    14331545/**
    1434  * Loads the guest state
     1546 * Loads the guest state.
    14351547 *
    14361548 * NOTE: Don't do anything here that can cause a jump back to ring 3!!!!!
    14371549 *
    14381550 * @returns VBox status code.
    1439  * @param   pVM         The VM to operate on.
    1440  * @param   pVCpu       The VMCPU to operate on.
    1441  * @param   pCtx        Guest context
     1551 * @param   pVM         Pointer to the VM.
     1552 * @param   pVCpu       Pointer to the VMCPU.
     1553 * @param   pCtx        Pointer to the guest CPU context.
    14421554 */
    14431555VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     
    14461558    RTGCUINTPTR val;
    14471559
    1448     /* VMX_VMCS_CTRL_ENTRY_CONTROLS
     1560    /*
     1561     * VMX_VMCS_CTRL_ENTRY_CONTROLS
    14491562     * Set required bits to one and zero according to the MSR capabilities.
    14501563     */
    14511564    val  = pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0;
    1452     /* Load guest debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
     1565
     1566    /*
     1567     * Load guest debug controls (DR7 & IA32_DEBUGCTL_MSR).
     1568     * Forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs
     1569     */
    14531570    val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG;
    1454     /* 64 bits guest mode? */
     1571
    14551572    if (CPUMIsGuestInLongModeEx(pCtx))
    14561573        val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE;
    14571574    /* else Must be zero when AMD64 is not available. */
    14581575
    1459     /* Mask away the bits that the CPU doesn't support */
     1576    /*
     1577     * Mask away the bits that the CPU doesn't support.
     1578     */
    14601579    val &= pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1;
    14611580    rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, val);
    14621581    AssertRC(rc);
    14631582
    1464     /* VMX_VMCS_CTRL_EXIT_CONTROLS
     1583    /*
     1584     * VMX_VMCS_CTRL_EXIT_CONTROLS
    14651585     * Set required bits to one and zero according to the MSR capabilities.
    14661586     */
    14671587    val  = pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0;
    14681588
    1469     /* Save debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
     1589    /*
     1590     * Save debug controls (DR7 & IA32_DEBUGCTL_MSR)
     1591     * Forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs
     1592     */
    14701593    val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG;
    14711594
     
    14731596    if (VMX_IS_64BIT_HOST_MODE())
    14741597        val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64;
    1475     /* else: Must be zero when AMD64 is not available. */
     1598    /* else Must be zero when AMD64 is not available. */
    14761599#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
    14771600    if (CPUMIsGuestInLongModeEx(pCtx))
     
    14811604#endif
    14821605    val &= pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1;
    1483     /* Don't acknowledge external interrupts on VM-exit. */
     1606
     1607    /*
     1608     * Don't acknowledge external interrupts on VM-exit.
     1609     */
    14841610    rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, val);
    14851611    AssertRC(rc);
    14861612
    1487     /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
     1613    /*
     1614     * Guest CPU context: ES, CS, SS, DS, FS, GS.
     1615     */
    14881616    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
    14891617    {
     
    14931621            if (pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode != enmGuestMode)
    14941622            {
    1495                 /* Correct weird requirements for switching to protected mode. */
     1623                /*
     1624                 * Correct weird requirements for switching to protected mode.
     1625                 */
    14961626                if (    pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
    14971627                    &&  enmGuestMode >= PGMMODE_PROTECTED)
    14981628                {
    14991629#ifdef VBOX_WITH_REM
    1500                     /* Flush the recompiler code cache as it's not unlikely
    1501                      * the guest will rewrite code it will later execute in real
    1502                      * mode (OpenBSD 4.0 is one such example)
     1630                    /*
     1631                     * Flush the recompiler code cache as it's not unlikely the guest will rewrite code
     1632                     * it will later execute in real mode (OpenBSD 4.0 is one such example)
    15031633                     */
    15041634                    REMFlushTBs(pVM);
    15051635#endif
    15061636
    1507                     /* DPL of all hidden selector registers must match the current CPL (0). */
     1637                    /*
     1638                     * DPL of all hidden selector registers must match the current CPL (0).
     1639                     */
    15081640                    pCtx->csHid.Attr.n.u2Dpl  = 0;
    15091641                    pCtx->csHid.Attr.n.u4Type = X86_SEL_TYPE_CODE | X86_SEL_TYPE_RW_ACC;
     
    15171649                pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = enmGuestMode;
    15181650            }
    1519             else
    1520             /* VT-x will fail with a guest invalid state otherwise... (CPU state after a reset) */
    1521             if (   CPUMIsGuestInRealModeEx(pCtx)
    1522                 && pCtx->csHid.u64Base == 0xffff0000)
     1651            else if (   CPUMIsGuestInRealModeEx(pCtx)
     1652                     && pCtx->csHid.u64Base == 0xffff0000)
    15231653            {
     1654                /* VT-x will fail with a guest invalid state otherwise... (CPU state after a reset) */
    15241655                pCtx->csHid.u64Base = 0xf0000;
    15251656                pCtx->cs = 0xf000;
     
    15461677    }
    15471678
    1548     /* Guest CPU context: LDTR. */
     1679    /*
     1680     * Guest CPU context: LDTR.
     1681     */
    15491682    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)
    15501683    {
     
    15661699        AssertRC(rc);
    15671700    }
    1568     /* Guest CPU context: TR. */
     1701
     1702    /*
     1703     * Guest CPU context: TR.
     1704     */
    15691705    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)
    15701706    {
    1571         /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */
     1707        /*
     1708         * Real mode emulation using v86 mode with CR4.VME (interrupt redirection
     1709         * using the int bitmap in the TSS).
     1710         */
    15721711        if (    CPUMIsGuestInRealModeEx(pCtx)
    15731712            &&  pVM->hwaccm.s.vmx.pRealModeTSS)
     
    15751714            RTGCPHYS GCPhys;
    15761715
    1577             /* We convert it here every time as pci regions could be reconfigured. */
     1716            /* We convert it here every time as PCI regions could be reconfigured. */
    15781717            rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pRealModeTSS, &GCPhys);
    15791718            AssertRC(rc);
     
    16121751        AssertRC(rc);
    16131752    }
    1614     /* Guest CPU context: GDTR. */
     1753
     1754    /*
     1755     * Guest CPU context: GDTR.
     1756     */
    16151757    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)
    16161758    {
     
    16191761        AssertRC(rc);
    16201762    }
    1621     /* Guest CPU context: IDTR. */
     1763
     1764    /*
     1765     * Guest CPU context: IDTR.
     1766     */
    16221767    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)
    16231768    {
     
    16281773
    16291774    /*
    1630      * Sysenter MSRs
     1775     * Sysenter MSRs.
    16311776     */
    16321777    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_MSR)
     
    16381783    }
    16391784
    1640     /* Control registers */
     1785    /*
     1786     * Guest CPU context: Control registers.
     1787     */
    16411788    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)
    16421789    {
     
    16651812            if (CPUMIsGuestInPagedProtectedModeEx(pCtx))
    16661813            {
    1667                 /* Disable cr3 read/write monitoring as we don't need it for EPT. */
     1814                /* Disable CR3 read/write monitoring as we don't need it for EPT. */
    16681815                pVCpu->hwaccm.s.vmx.proc_ctls &=  ~(  VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
    16691816                                                    | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT);
     
    16711818            else
    16721819            {
    1673                 /* Reenable cr3 read/write monitoring as our identity mapped page table is active. */
     1820                /* Reenable CR3 read/write monitoring as our identity mapped page table is active. */
    16741821                pVCpu->hwaccm.s.vmx.proc_ctls |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
    16751822                                                 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
     
    16891836        rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_CR0,            val);
    16901837        Log2(("Guest CR0 %08x\n", val));
    1691         /* CR0 flags owned by the host; if the guests attempts to change them, then
    1692          * the VM will exit.
     1838
     1839        /*
     1840         * CR0 flags owned by the host; if the guests attempts to change them, then the VM will exit.
    16931841         */
    16941842        val =   X86_CR0_PE  /* Must monitor this bit (assumptions are made for real mode emulation) */
     
    16961844              | X86_CR0_PG  /* Must monitor this bit (assumptions are made for real mode & protected mode without paging emulation) */
    16971845              | X86_CR0_CD  /* Bit not restored during VM-exit! */
    1698               | X86_CR0_NW /* Bit not restored during VM-exit! */
     1846              | X86_CR0_NW  /* Bit not restored during VM-exit! */
    16991847              | X86_CR0_NE;
    17001848
    1701         /* When the guest's FPU state is active, then we no longer care about
    1702          * the FPU related bits.
     1849        /*
     1850         * When the guest's FPU state is active, then we no longer care about the FPU related bits.
    17031851         */
    17041852        if (CPUMIsGuestFPUStateActive(pVCpu) == false)
     
    17111859        AssertRC(rc);
    17121860    }
     1861
    17131862    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)
    17141863    {
    1715         /* CR4 */
    17161864        rc  = VMXWriteVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW,   pCtx->cr4);
    17171865        Log2(("Guest CR4-shadow %08x\n", pCtx->cr4));
     
    17231871            switch(pVCpu->hwaccm.s.enmShadowMode)
    17241872            {
    1725             case PGMMODE_REAL:          /* Real mode                 -> emulated using v86 mode */
    1726             case PGMMODE_PROTECTED:     /* Protected mode, no paging -> emulated using identity mapping. */
    1727             case PGMMODE_32_BIT:        /* 32-bit paging. */
    1728                 val &= ~X86_CR4_PAE;
    1729                 break;
    1730 
    1731             case PGMMODE_PAE:           /* PAE paging. */
    1732             case PGMMODE_PAE_NX:        /* PAE paging with NX enabled. */
    1733                 /** Must use PAE paging as we could use physical memory > 4 GB */
    1734                 val |= X86_CR4_PAE;
    1735                 break;
    1736 
    1737             case PGMMODE_AMD64:         /* 64-bit AMD paging (long mode). */
    1738             case PGMMODE_AMD64_NX:      /* 64-bit AMD paging (long mode) with NX enabled. */
     1873                case PGMMODE_REAL:          /* Real mode                 -> emulated using v86 mode */
     1874                case PGMMODE_PROTECTED:     /* Protected mode, no paging -> emulated using identity mapping. */
     1875                case PGMMODE_32_BIT:        /* 32-bit paging. */
     1876                    val &= ~X86_CR4_PAE;
     1877                    break;
     1878
     1879                case PGMMODE_PAE:           /* PAE paging. */
     1880                case PGMMODE_PAE_NX:        /* PAE paging with NX enabled. */
     1881                    /** Must use PAE paging as we could use physical memory > 4 GB */
     1882                    val |= X86_CR4_PAE;
     1883                    break;
     1884
     1885                case PGMMODE_AMD64:         /* 64-bit AMD paging (long mode). */
     1886                case PGMMODE_AMD64_NX:      /* 64-bit AMD paging (long mode) with NX enabled. */
    17391887#ifdef VBOX_ENABLE_64_BITS_GUESTS
    1740                 break;
     1888                    break;
    17411889#else
    1742                 AssertFailed();
    1743                 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    1744 #endif
    1745             default:                   /* shut up gcc */
    1746                 AssertFailed();
    1747                 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
     1890                    AssertFailed();
     1891                    return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
     1892#endif
     1893                default:                   /* shut up gcc */
     1894                    AssertFailed();
     1895                    return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    17481896            }
    17491897        }
    1750         else
    1751         if (    !CPUMIsGuestInPagedProtectedModeEx(pCtx)
    1752             &&  !pVM->hwaccm.s.vmx.fUnrestrictedGuest)
     1898        else if (   !CPUMIsGuestInPagedProtectedModeEx(pCtx)
     1899                 && !pVM->hwaccm.s.vmx.fUnrestrictedGuest)
    17531900        {
    17541901            /* We use 4 MB pages in our identity mapping page table for real and protected mode without paging. */
     
    17581905        }
    17591906
    1760         /* Turn off VME if we're in emulated real mode. */
     1907        /*
     1908         * Turn off VME if we're in emulated real mode.
     1909         */
    17611910        if (    CPUMIsGuestInRealModeEx(pCtx)
    17621911            &&  pVM->hwaccm.s.vmx.pRealModeTSS)
     1912        {
    17631913            val &= ~X86_CR4_VME;
     1914        }
    17641915
    17651916        rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_CR4,            val);
    17661917        Log2(("Guest CR4 %08x\n", val));
    1767         /* CR4 flags owned by the host; if the guests attempts to change them, then
    1768          * the VM will exit.
     1918
     1919        /*
     1920         * CR4 flags owned by the host; if the guests attempts to change them, then the VM will exit.
    17691921         */
    17701922        val =   0
     
    18121964                RTGCPHYS GCPhys;
    18131965
    1814                 /* We convert it here every time as pci regions could be reconfigured. */
     1966                /* We convert it here every time as PCI regions could be reconfigured. */
    18151967                rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
    18161968                AssertMsgRC(rc, ("pNonPagingModeEPTPageTable = %RGv\n", pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable));
    18171969
    1818                 /* We use our identity mapping page table here as we need to map guest virtual to guest physical addresses; EPT will
    1819                  * take care of the translation to host physical addresses.
     1970                /*
     1971                 * We use our identity mapping page table here as we need to map guest virtual to
     1972                 * guest physical addresses; EPT will take care of the translation to host physical addresses.
    18201973                 */
    18211974                val = GCPhys;
     
    18401993    }
    18411994
    1842     /* Debug registers. */
     1995    /*
     1996     * Guest CPU context: Debug registers.
     1997     */
    18431998    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)
    18441999    {
     
    18792034            STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxArmed);
    18802035
    1881             /* Disable drx move intercepts. */
     2036            /* Disable DRx move intercepts. */
    18822037            pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
    18832038            rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     
    18942049
    18952050        /** @todo do we really ever need this? */
    1896         rc |= VMXWriteVMCS(VMX_VMCS_GUEST_DEBUG_EXCEPTIONS,         0);
     2051        rc |= VMXWriteVMCS(VMX_VMCS_GUEST_DEBUG_EXCEPTIONS,  0);
    18972052        AssertRC(rc);
    18982053    }
    18992054
    1900     /* 64 bits guest mode? */
     2055    /*
     2056     * 64-bit guest mode.
     2057     */
    19012058    if (CPUMIsGuestInLongModeEx(pCtx))
    19022059    {
     
    19942151    else
    19952152        fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hwaccm.s.vmx.u64TSCOffset);
     2153
    19962154    if (fOffsettedTsc)
    19972155    {
     
    20112169        {
    20122170            /* Fall back to rdtsc emulation as we would otherwise pass decreasing tsc values to the guest. */
    2013             LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC, pVCpu->hwaccm.s.vmx.u64TSCOffset, u64CurTSC + pVCpu->hwaccm.s.vmx.u64TSCOffset, TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVCpu->hwaccm.s.vmx.u64TSCOffset, TMCpuTickGet(pVCpu)));
     2171            LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC,
     2172                     pVCpu->hwaccm.s.vmx.u64TSCOffset, u64CurTSC + pVCpu->hwaccm.s.vmx.u64TSCOffset,
     2173                     TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVCpu->hwaccm.s.vmx.u64TSCOffset,
     2174                     TMCpuTickGet(pVCpu)));
    20142175            pVCpu->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
    20152176            rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     
    20292190    pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
    20302191
    2031     /* Minimal guest state update (esp, eip, eflags mostly) */
     2192    /* Minimal guest state update (ESP, EIP, EFLAGS mostly) */
    20322193    VMXR0LoadMinimalGuestState(pVM, pVCpu, pCtx);
    20332194    return rc;
    20342195}
    20352196
     2197
    20362198/**
    2037  * Syncs back the guest state
     2199 * Syncs back the guest state from VMCS.
    20382200 *
    20392201 * @returns VBox status code.
    2040  * @param   pVM         The VM to operate on.
    2041  * @param   pVCpu       The VMCPU to operate on.
    2042  * @param   pCtx        Guest context
     2202 * @param   pVM         Pointer to the VM.
     2203 * @param   pVCpu       Pointer to the VMCPU.
     2204 * @param   pCtx        Pointer the guest CPU context.
    20432205 */
    20442206DECLINLINE(int) VMXR0SaveGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     
    20482210    int         rc;
    20492211
    2050     /* Let's first sync back eip, esp, and eflags. */
     2212    /* First sync back EIP, ESP, and EFLAGS. */
    20512213    rc = VMXReadCachedVMCS(VMX_VMCS64_GUEST_RIP,              &val);
    20522214    AssertRC(rc);
     
    20822244    CPUMSetGuestCR4(pVCpu, val);
    20832245
    2084     /* Note: no reason to sync back the CRx registers. They can't be changed by the guest. */
    2085     /* Note: only in the nested paging case can CR3 & CR4 be changed by the guest. */
    2086     if (    pVM->hwaccm.s.fNestedPaging
    2087         &&  CPUMIsGuestInPagedProtectedModeEx(pCtx)) /** @todo check if we will always catch mode switches and such... */
     2246    /*
     2247     * No reason to sync back the CRx registers. They can't be changed by the guest unless in
     2248     * the nested paging case where CR3 & CR4 can be changed by the guest.
     2249     */
     2250    if (   pVM->hwaccm.s.fNestedPaging
     2251        && CPUMIsGuestInPagedProtectedModeEx(pCtx)) /** @todo check if we will always catch mode switches and such... */
    20882252    {
    20892253        PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
     
    21032267    }
    21042268
    2105     /* Sync back DR7 here. */
     2269    /* Sync back DR7. */
    21062270    VMXReadCachedVMCS(VMX_VMCS64_GUEST_DR7, &val);
    21072271    pCtx->dr[7] = val;
     
    21152279    VMX_READ_SELREG(GS, gs);
    21162280
    2117     /*
    2118      * System MSRs
    2119      */
     2281    /* System MSRs */
    21202282    VMXReadCachedVMCS(VMX_VMCS32_GUEST_SYSENTER_CS,    &val);
    21212283    pCtx->SysEnter.cs       = val;
     
    21802342        case MSR_K6_EFER:
    21812343            /* EFER can't be changed without causing a VM-exit. */
    2182 //            Assert(pCtx->msrEFER == pMsr->u64Value);
     2344            /* Assert(pCtx->msrEFER == pMsr->u64Value); */
    21832345            break;
    21842346        default:
     
    21912353}
    21922354
     2355
    21932356/**
    2194  * Dummy placeholder
     2357 * Dummy placeholder for TLB flush handling before VM-entry. Used in the case
     2358 * where neither EPT nor VPID is supported by the CPU.
    21952359 *
    2196  * @param   pVM         The VM to operate on.
    2197  * @param   pVCpu       The VMCPU to operate on.
     2360 * @param   pVM         Pointer to the VM.
     2361 * @param   pVCpu       Pointer to the VMCPU.
    21982362 */
    21992363static DECLCALLBACK(void) hmR0VmxSetupTLBDummy(PVM pVM, PVMCPU pVCpu)
     
    22102374 * Setup the tagged TLB for EPT+VPID.
    22112375 *
    2212  * @param    pVM        The VM to operate on.
    2213  * @param    pVCpu      The VMCPU to operate on.
     2376 * @param    pVM        Pointer to the VM.
     2377 * @param    pVCpu      Pointer to the VMCPU.
    22142378 */
    22152379static DECLCALLBACK(void) hmR0VmxSetupTLBBoth(PVM pVM, PVMCPU pVCpu)
     
    23332497 *
    23342498 * @returns VBox status code.
    2335  * @param   pVM         The VM to operate on.
    2336  * @param   pVCpu       The VMCPU to operate on.
     2499 * @param   pVM         Pointer to the VM.
     2500 * @param   pVCpu       Pointer to the VMCPU.
    23372501 */
    23382502static DECLCALLBACK(void) hmR0VmxSetupTLBEPT(PVM pVM, PVMCPU pVCpu)
     
    23432507    Assert(!pVM->hwaccm.s.vmx.fVPID);
    23442508
    2345     /* Deal with tagged TLBs if VPID or EPT is supported. */
    23462509    pCpu = HWACCMR0GetCurrentCpu();
    2347     /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */
    2348     /* Note that this can happen both for start and resume due to long jumps back to ring 3. */
     2510
     2511    /*
     2512     * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last
     2513     * This can happen both for start & resume due to long jumps back to ring-3.
     2514     * If the TLB flush count shouldn't really change in this EPT-only case.
     2515     */
    23492516    if (    pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
    2350             /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */
    23512517        ||  pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    23522518    {
    2353         /* Force a TLB flush on VM entry. */
    23542519        pVCpu->hwaccm.s.fForceTLBFlush = true;
    23552520    }
    23562521
    2357     /* Check for tlb shootdown flushes. */
     2522    /*
     2523     * Check for explicit TLB shootdown flushes.
     2524     */
    23582525    if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    23592526        pVCpu->hwaccm.s.fForceTLBFlush = true;
     
    23902557
    23912558/**
    2392  * Setup the tagged TLB for VPID
     2559 * Setup the tagged TLB for VPID.
    23932560 *
    23942561 * @returns VBox status code.
    2395  * @param   pVM         The VM to operate on.
    2396  * @param   pVCpu       The VMCPU to operate on.
     2562 * @param   pVM         Pointer to the VM.
     2563 * @param   pVCpu       Pointer to the VMCPU.
    23972564 */
    23982565static DECLCALLBACK(void) hmR0VmxSetupTLBVPID(PVM pVM, PVMCPU pVCpu)
     
    24032570    Assert(!pVM->hwaccm.s.fNestedPaging);
    24042571
    2405     /* Deal with tagged TLBs if VPID or EPT is supported. */
    24062572    pCpu = HWACCMR0GetCurrentCpu();
    2407     /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */
    2408     /* Note that this can happen both for start and resume due to long jumps back to ring 3. */
     2573
     2574    /*
     2575     * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last
     2576     * This can happen both for start & resume due to long jumps back to ring-3.
     2577     * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB,
     2578     * so we cannot reuse the current ASID anymore.
     2579     */
    24092580    if (    pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
    2410             /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */
    24112581        ||  pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    24122582    {
     
    24152585    }
    24162586
    2417     pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
    2418 
    2419     /* Check for tlb shootdown flushes. */
     2587    /*
     2588     * Check for explicit TLB shootdown flushes.
     2589    */
    24202590    if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    24212591        pVCpu->hwaccm.s.fForceTLBFlush = true;
    24222592
    2423     /* Make sure we flush the TLB when required. */
     2593    pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
     2594
    24242595    if (pVCpu->hwaccm.s.fForceTLBFlush)
    24252596    {
     
    24852656 *
    24862657 * @returns VBox status code.
    2487  * @param   pVM         The VM to operate on.
    2488  * @param   pVCpu       The VMCPU to operate on.
    2489  * @param   pCtx        Guest context
     2658 * @param   pVM         Pointer to the VM.
     2659 * @param   pVCpu       Pointer to the VMCPU.
     2660 * @param   pCtx        Pointer to the guest CPU context.
    24902661 */
    24912662VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     
    25182689    Assert(!(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) || (pVCpu->hwaccm.s.vmx.pbVAPIC && pVM->hwaccm.s.vmx.pAPIC));
    25192690
    2520     /* Check if we need to use TPR shadowing. */
     2691    /*
     2692     * Check if we need to use TPR shadowing.
     2693     */
    25212694    if (    CPUMIsGuestInLongModeEx(pCtx)
    25222695        || (   ((pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) || pVM->hwaccm.s.fTRPPatchingAllowed)
     
    25352708        rc2 = VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, &val2);
    25362709        AssertRC(rc2);
    2537         Log2(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS = %08x\n", val2));
     2710        Log2(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS = %08x\n",  val2));
    25382711
    25392712        /* allowed zero */
     
    25472720        rc2 = VMXReadVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, &val2);
    25482721        AssertRC(rc2);
    2549         Log2(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS = %08x\n", val2));
    2550 
    2551         /* Must be set according to the MSR, but can be cleared in case of EPT. */
     2722        Log2(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS = %08x\n",  val2));
     2723
     2724        /*
     2725         * Must be set according to the MSR, but can be cleared if nested paging is used.
     2726         */
    25522727        if (pVM->hwaccm.s.fNestedPaging)
     2728        {
    25532729            val2 |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
    25542730                    | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
    25552731                    | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
     2732        }
    25562733
    25572734        /* allowed zero */
     
    25652742        rc2 = VMXReadVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, &val2);
    25662743        AssertRC(rc2);
    2567         Log2(("VMX_VMCS_CTRL_ENTRY_CONTROLS = %08x\n", val2));
     2744        Log2(("VMX_VMCS_CTRL_ENTRY_CONTROLS = %08x\n",  val2));
    25682745
    25692746        /* allowed zero */
     
    25772754        rc2 = VMXReadVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, &val2);
    25782755        AssertRC(rc2);
    2579         Log2(("VMX_VMCS_CTRL_EXIT_CONTROLS = %08x\n", val2));
     2756        Log2(("VMX_VMCS_CTRL_EXIT_CONTROLS = %08x\n",  val2));
    25802757
    25812758        /* allowed zero */
     
    25942771#endif
    25952772
    2596     /* We can jump to this point to resume execution after determining that a VM-exit is innocent.
     2773    /*
     2774     * We can jump to this point to resume execution after determining that a VM-exit is innocent.
    25972775     */
    25982776ResumeExecution:
     
    26062784    Assert(fWasInLongMode == CPUMIsGuestInLongModeEx(pCtx));
    26072785
    2608     /* Safety precaution; looping for too long here can have a very bad effect on the host */
     2786    /*
     2787     * Safety precaution; looping for too long here can have a very bad effect on the host.
     2788     */
    26092789    if (RT_UNLIKELY(++cResume > pVM->hwaccm.s.cMaxResumeLoops))
    26102790    {
     
    26142794    }
    26152795
    2616     /* Check for irq inhibition due to instruction fusing (sti, mov ss). */
     2796    /*
     2797     * Check for IRQ inhibition due to instruction fusing (sti, mov ss).
     2798     */
    26172799    if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    26182800    {
     
    26202802        if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
    26212803        {
    2622             /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here.
     2804            /*
     2805             * Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here.
    26232806             * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
    26242807             * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
     
    26512834#endif
    26522835
    2653     /* Check for pending actions that force us to go back to ring 3. */
     2836    /*
     2837     * Check for pending actions that force us to go back to ring-3.
     2838     */
    26542839    if (    VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
    26552840        ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HWACCM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST))
     
    27252910#endif
    27262911
    2727     /* When external interrupts are pending, we should exit the VM when IF is set. */
    2728     /* Note! *After* VM_FF_INHIBIT_INTERRUPTS check!!! */
     2912    /*
     2913     * When external interrupts are pending, we should exit the VM when IF is et.
     2914     * Note: *After* VM_FF_INHIBIT_INTERRUPTS check!
     2915     */
    27292916    rc = hmR0VmxCheckPendingInterrupt(pVM, pVCpu, pCtx);
    27302917    if (RT_FAILURE(rc))
     
    27332920    /** @todo check timers?? */
    27342921
    2735     /* TPR caching using CR8 is only available in 64 bits mode */
    2736     /* Note the 32 bits exception for AMD (X86_CPUID_AMD_FEATURE_ECX_CR8L), but that appears missing in Intel CPUs */
    2737     /* Note: we can't do this in LoadGuestState as PDMApicGetTPR can jump back to ring 3 (lock)!!!!! (no longer true) */
    2738     /**
    2739      * @todo query and update the TPR only when it could have been changed (mmio access & wrmsr (x2apic))
    2740      */
     2922    /*
     2923     * TPR caching using CR8 is only available in 64-bit mode.
     2924     * Note: The 32-bit exception for AMD (X86_CPUID_AMD_FEATURE_ECX_CR8L), but this appears missing in Intel CPUs.
     2925     * Note: We can't do this in LoadGuestState() as PDMApicGetTPR can jump back to ring-3 (lock)!! (no longer true)                                                                                                           .
     2926     */
     2927    /** @todo query and update the TPR only when it could have been changed (mmio
     2928     *        access & wrsmr (x2apic) */
    27412929    if (fSetupTPRCaching)
    27422930    {
     
    27492937        pVCpu->hwaccm.s.vmx.pbVAPIC[0x80] = u8LastTPR;
    27502938
    2751         /* Two options here:
     2939        /*
     2940         * Two options here:
    27522941         * - external interrupt pending, but masked by the TPR value.
    27532942         *   -> a CR8 update that lower the current TPR value should cause an exit
     
    27712960            else
    27722961            {
    2773                 /* No interrupts are pending, so we don't need to be explicitely notified.
     2962                /*
     2963                 * No interrupts are pending, so we don't need to be explicitely notified.
    27742964                 * There are enough world switches for detecting pending interrupts.
    27752965                 */
     
    28012991
    28022992    /*
    2803      * NOTE: DO NOT DO ANYTHING AFTER THIS POINT THAT MIGHT JUMP BACK TO RING 3!
     2993     * NOTE: DO NOT DO ANYTHING AFTER THIS POINT THAT MIGHT JUMP BACK TO RING-3!
    28042994     *       (until the actual world switch)
    28052995     */
     
    28103000    VMMR0LogFlushDisable(pVCpu);
    28113001#endif
    2812     /* Save the host state first. */
     3002
     3003    /*
     3004     * Save the host state first.
     3005     */
    28133006    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT)
    28143007    {
     
    28213014    }
    28223015
    2823     /* Load the guest state */
     3016    /*
     3017     * Load the guest state.
     3018     */
    28243019    if (!pVCpu->hwaccm.s.fContextUseFlags)
    28253020    {
     
    28393034
    28403035#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    2841     /* Disable interrupts to make sure a poke will interrupt execution.
     3036    /*
     3037     * Disable interrupts to make sure a poke will interrupt execution.
    28423038     * This must be done *before* we check for TLB flushes; TLB shootdowns rely on this.
    28433039     */
     
    28563052    pVM->hwaccm.s.vmx.pfnSetupTaggedTLB(pVM, pVCpu);
    28573053
    2858     /* Manual save and restore:
     3054    /*
     3055     * Manual save and restore:
    28593056     * - General purpose registers except RIP, RSP
    28603057     *
     
    28653062     * - DR7 (reset to 0x400)
    28663063     * - EFLAGS (reset to RT_BIT(1); not relevant)
    2867      *
    28683064     */
    28693065
     
    28773073#endif
    28783074
    2879     /* Save the current TPR value in the LSTAR msr so our patches can access it. */
     3075    /*
     3076     * Save the current TPR value in the LSTAR MSR so our patches can access it.
     3077     */
    28803078    if (pVM->hwaccm.s.fTPRPatchingActive)
    28813079    {
     
    28933091    ASMAtomicWriteBool(&pVCpu->hwaccm.s.fCheckedTLBFlush, false);
    28943092    ASMAtomicIncU32(&pVCpu->hwaccm.s.cWorldSwitchExits);
    2895     /* Possibly the last TSC value seen by the guest (too high) (only when we're in tsc offset mode). */
     3093    /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */
    28963094    if (!(pVCpu->hwaccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
    2897         TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hwaccm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
     3095    {
     3096        TMCpuTickSetLastSeen(pVCpu,
     3097                             ASMReadTSC() + pVCpu->hwaccm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
     3098    }
    28983099
    28993100    TMNotifyEndOfExecution(pVCpu);
     
    29013102    Assert(!(ASMGetFlags() & X86_EFL_IF));
    29023103
    2903     /* Restore the host LSTAR msr if the guest could have changed it. */
     3104    /*
     3105     * Restore the host LSTAR MSR if the guest could have changed it.
     3106     */
    29043107    if (pVM->hwaccm.s.fTPRPatchingActive)
    29053108    {
     
    29503153    AssertRC(rc2);
    29513154
    2952     /* Sync back the guest state */
     3155    /*
     3156     * Sync back the guest state.
     3157     */
    29533158    rc2 = VMXR0SaveGuestState(pVM, pVCpu, pCtx);
    29543159    AssertRC(rc2);
     
    29613166#endif
    29623167
    2963     /* Check if an injected event was interrupted prematurely. */
     3168    /*
     3169     * Check if an injected event was interrupted prematurely.
     3170     */
    29643171    rc2 = VMXReadCachedVMCS(VMX_VMCS32_RO_IDT_INFO,            &val);
    29653172    AssertRC(rc2);
     
    29793186            AssertRC(rc2);
    29803187            pVCpu->hwaccm.s.Event.errCode  = val;
    2981             Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv pending error=%RX64\n", pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val));
     3188            Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv pending error=%RX64\n",
     3189                 pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val));
    29823190        }
    29833191        else
    29843192        {
    2985             Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n", pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));
     3193            Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n", pVCpu->hwaccm.s.Event.intInfo,
     3194                 (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));
    29863195            pVCpu->hwaccm.s.Event.errCode  = 0;
    29873196        }
    29883197    }
    29893198#ifdef VBOX_STRICT
    2990     else
    2991     if (    VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hwaccm.s.Event.intInfo)
    2992         /* Ignore software exceptions (such as int3) as they're reoccur when we restart the instruction anyway. */
    2993         &&  VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hwaccm.s.Event.intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT)
    2994     {
    2995         Log(("Ignore pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n", pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));
     3199    else if (   VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hwaccm.s.Event.intInfo)
     3200                /* Ignore software exceptions (such as int3) as they're reoccur when we restart the instruction anyway. */
     3201             && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hwaccm.s.Event.intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT)
     3202    {
     3203        Log(("Ignore pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n",
     3204            pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));
    29963205    }
    29973206
     
    30063215    Log2(("IntInfo = %08x\n", (uint32_t)intInfo));
    30073216
    3008     /* Sync back the TPR if it was changed. */
     3217    /*
     3218     * Sync back the TPR if it was changed.
     3219     */
    30093220    if (    fSetupTPRCaching
    30103221        &&  u8LastTPR != pVCpu->hwaccm.s.vmx.pbVAPIC[0x80])
     
    30833294                Log(("Forward #NM fault to the guest\n"));
    30843295                STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNM);
    3085                 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, 0);
     3296                rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
     3297                                         cbInstr, 0);
    30863298                AssertRC(rc2);
    30873299                STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     
    30933305#ifdef VBOX_ALWAYS_TRAP_PF
    30943306                if (pVM->hwaccm.s.fNestedPaging)
    3095                 {   /* A genuine pagefault.
    3096                      * Forward the trap to the guest by injecting the exception and resuming execution.
     3307                {
     3308                    /*
     3309                     * A genuine pagefault. Forward the trap to the guest by injecting the exception and resuming execution.
    30973310                     */
    3098                     Log(("Guest page fault at %RGv cr2=%RGv error code %RGv rsp=%RGv\n", (RTGCPTR)pCtx->rip, exitQualification, errCode, (RTGCPTR)pCtx->rsp));
     3311                    Log(("Guest page fault at %RGv cr2=%RGv error code %RGv rsp=%RGv\n", (RTGCPTR)pCtx->rip, exitQualification,
     3312                         errCode, (RTGCPTR)pCtx->rsp));
    30993313
    31003314                    Assert(CPUMIsGuestInPagedProtectedModeEx(pCtx));
     
    31043318                    /* Now we must update CR2. */
    31053319                    pCtx->cr2 = exitQualification;
    3106                     rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
     3320                    rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
     3321                                             cbInstr, errCode);
    31073322                    AssertRC(rc2);
    31083323
     
    31103325                    goto ResumeExecution;
    31113326                }
    3112 #endif
     3327#else
    31133328                Assert(!pVM->hwaccm.s.fNestedPaging);
     3329#endif
    31143330
    31153331#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
     
    31243340                {
    31253341                    RTGCPHYS GCPhysApicBase, GCPhys;
    3126                     PDMApicGetBase(pVM, &GCPhysApicBase);   /* @todo cache this */
     3342                    PDMApicGetBase(pVM, &GCPhysApicBase);   /** @todo cache this */
    31273343                    GCPhysApicBase &= PAGE_BASE_GC_MASK;
    31283344
     
    31813397                    goto ResumeExecution;
    31823398                }
    3183                 else
    3184                 if (rc == VINF_EM_RAW_GUEST_TRAP)
    3185                 {   /* A genuine pagefault.
    3186                      * Forward the trap to the guest by injecting the exception and resuming execution.
     3399                else if (rc == VINF_EM_RAW_GUEST_TRAP)
     3400                {
     3401                    /*
     3402                     * A genuine pagefault. Forward the trap to the guest by injecting the exception and resuming execution.
    31873403                     */
    31883404                    Log2(("Forward page fault to the guest\n"));
     
    31963412                    /* Now we must update CR2. */
    31973413                    pCtx->cr2 = exitQualification;
    3198                     rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
     3414                    rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
     3415                                             cbInstr, errCode);
    31993416                    AssertRC(rc2);
    32003417
     
    32233440                }
    32243441                Log(("Trap %x at %04X:%RGv\n", vector, pCtx->cs, (RTGCPTR)pCtx->rip));
    3225                 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
     3442                rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
     3443                                         cbInstr, errCode);
    32263444                AssertRC(rc2);
    32273445
     
    32343452                uint64_t uDR6;
    32353453
    3236                 /* DR6, DR7.GD and IA32_DEBUGCTL.LBR are not updated yet.
     3454                /*
     3455                 * DR6, DR7.GD and IA32_DEBUGCTL.LBR are not updated yet.
    32373456                 *
    32383457                 * Exit qualification bits:
     
    32593478                        ASMSetDR6(pCtx->dr[6]);
    32603479
    3261                     /* X86_DR7_GD will be cleared if drx accesses should be trapped inside the guest. */
     3480                    /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
    32623481                    pCtx->dr[7] &= ~X86_DR7_GD;
    32633482
     
    32713490                    AssertRC(rc2);
    32723491
    3273                     Log(("Trap %x (debug) at %RGv exit qualification %RX64 dr6=%x dr7=%x\n", vector, (RTGCPTR)pCtx->rip, exitQualification, (uint32_t)pCtx->dr[6], (uint32_t)pCtx->dr[7]));
    3274                     rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
     3492                    Log(("Trap %x (debug) at %RGv exit qualification %RX64 dr6=%x dr7=%x\n", vector, (RTGCPTR)pCtx->rip,
     3493                         exitQualification, (uint32_t)pCtx->dr[6], (uint32_t)pCtx->dr[7]));
     3494                    rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
     3495                                             cbInstr, errCode);
    32753496                    AssertRC(rc2);
    32763497
     
    32903511                {
    32913512                    Log(("Guest #BP at %04x:%RGv\n", pCtx->cs, pCtx->rip));
    3292                     rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
     3513                    rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
     3514                                             cbInstr, errCode);
    32933515                    AssertRC(rc2);
    32943516                    STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     
    33043526            }
    33053527
    3306             case X86_XCPT_GP:   /* General protection failure exception.*/
     3528            case X86_XCPT_GP:   /* General protection failure exception. */
    33073529            {
    33083530                uint32_t     cbOp;
     
    33153537                {
    33163538                    Log(("Trap %x at %04X:%RGv errorCode=%RGv\n", vector, pCtx->cs, (RTGCPTR)pCtx->rip, errCode));
    3317                     rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
     3539                    rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
     3540                                             cbInstr, errCode);
    33183541                    AssertRC(rc2);
    33193542                    STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     
    33433566                        EMSetInhibitInterruptsPC(pVCpu, pCtx->rip + pDis->opsize);
    33443567                        Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    3345                         rc2 = VMXWriteVMCS(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
     3568                        rc2 = VMXWriteVMCS(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE,
     3569                                           VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
    33463570                        AssertRC(rc2);
    33473571                        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitSti);
     
    34153639                        }
    34163640
    3417                         rc2 = SELMToFlatEx(pVCpu, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), (pCtx->esp - cbParm) & uMask, 0, &GCPtrStack);
     3641                        rc2 = SELMToFlatEx(pVCpu, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), (pCtx->esp - cbParm) & uMask, 0,
     3642                                           &GCPtrStack);
    34183643                        if (RT_FAILURE(rc2))
    34193644                        {
     
    34663691                        pCtx->cs            = aIretFrame[1];
    34673692                        pCtx->csHid.u64Base = pCtx->cs << 4;
    3468                         pCtx->eflags.u      = (pCtx->eflags.u & ~(X86_EFL_POPF_BITS & uMask)) | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
     3693                        pCtx->eflags.u      =   (pCtx->eflags.u & ~(X86_EFL_POPF_BITS & uMask))
     3694                                              | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
    34693695                        pCtx->sp           += sizeof(aIretFrame);
    34703696
     
    35373763                            pCtx->rip += cbOp; /* Move on to the next instruction. */
    35383764
    3539                         /* lidt, lgdt can end up here. In the future crx changes as well. Just reload the whole context to be done with it. */
     3765                        /*
     3766                         * LIDT, LGDT can end up here. In the future CRx changes as well. Just reload the
     3767                         * whole context to be done with it.
     3768                         */
    35403769                        pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
    35413770
     
    35613790                switch(vector)
    35623791                {
    3563                 case X86_XCPT_DE:
    3564                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDE);
    3565                     break;
    3566                 case X86_XCPT_UD:
    3567                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestUD);
    3568                     break;
    3569                 case X86_XCPT_SS:
    3570                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestSS);
    3571                     break;
    3572                 case X86_XCPT_NP:
    3573                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNP);
    3574                     break;
    3575                 case X86_XCPT_XF:
    3576                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestXF);
    3577                     break;
     3792                    case X86_XCPT_DE:
     3793                        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDE);
     3794                        break;
     3795                    case X86_XCPT_UD:
     3796                        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestUD);
     3797                        break;
     3798                    case X86_XCPT_SS:
     3799                        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestSS);
     3800                        break;
     3801                    case X86_XCPT_NP:
     3802                        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNP);
     3803                        break;
     3804                    case X86_XCPT_XF:
     3805                        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestXF);
     3806                        break;
    35783807                }
    35793808
    35803809                Log(("Trap %x at %04X:%RGv\n", vector, pCtx->cs, (RTGCPTR)pCtx->rip));
    3581                 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
     3810                rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
     3811                                         cbInstr, errCode);
    35823812                AssertRC(rc2);
    35833813
     
    35923822                {
    35933823                    Log(("Real Mode Trap %x at %04x:%04X error code %x\n", vector, pCtx->cs, pCtx->eip, errCode));
    3594                     rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
     3824                    rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
     3825                                            cbInstr, errCode);
    35953826                    AssertRC(VBOXSTRICTRC_VAL(rc)); /* Strict RC check below. */
    35963827
    3597                     /* Go back to ring 3 in case of a triple fault. */
    3598                     if (    vector == X86_XCPT_DF
    3599                         &&  rc == VINF_EM_RESET)
     3828                    /* Go back to ring-3 in case of a triple fault. */
     3829                    if (   vector == X86_XCPT_DF
     3830                        && rc == VINF_EM_RESET)
     3831                    {
    36003832                        break;
     3833                    }
    36013834
    36023835                    STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     
    36203853    }
    36213854
    3622     case VMX_EXIT_EPT_VIOLATION:        /* 48 EPT violation. An attempt to access memory with a guest-physical address was disallowed by the configuration of the EPT paging structures. */
     3855    /*
     3856     * 48 EPT violation. An attemp to access memory with a guest-physical address was disallowed
     3857     * by the configuration of the EPT paging structures.
     3858     */
     3859    case VMX_EXIT_EPT_VIOLATION:
    36233860    {
    36243861        RTGCPHYS GCPhys;
     
    36403877        /* If the page is present, then it's a page level protection fault. */
    36413878        if (exitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
    3642         {
    36433879            errCode |= X86_TRAP_PF_P;
    3644         }
    36453880        else
    36463881        {
    36473882            /* Shortcut for APIC TPR reads and writes. */
    36483883            if (    (GCPhys & 0xfff) == 0x080
    3649                 &&  GCPhys > 0x1000000   /* to skip VGA frame buffer accesses */
     3884                &&  GCPhys > 0x1000000          /* to skip VGA frame buffer accesses */
    36503885                &&  fSetupTPRCaching
    36513886                &&  (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
     
    36783913            || rc == VERR_PAGE_TABLE_NOT_PRESENT
    36793914            || rc == VERR_PAGE_NOT_PRESENT)
    3680         {   /* We've successfully synced our shadow pages, so let's just continue execution. */
     3915        {
     3916            /* We've successfully synced our shadow pages, so let's just continue execution. */
    36813917            Log2(("Shadow page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, exitQualification , errCode));
    36823918            STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitReasonNPF);
     
    37073943        /* Shortcut for APIC TPR reads and writes. */
    37083944        if (    (GCPhys & 0xfff) == 0x080
    3709             &&  GCPhys > 0x1000000   /* to skip VGA frame buffer accesses */
     3945            &&  GCPhys > 0x1000000              /* to skip VGA frame buffer accesses */
    37103946            &&  fSetupTPRCaching
    37113947            &&  (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
     
    37453981    case VMX_EXIT_IRQ_WINDOW:           /* 7 Interrupt window. */
    37463982        /* Clear VM-exit on IF=1 change. */
    3747         LogFlow(("VMX_EXIT_IRQ_WINDOW %RGv pending=%d IF=%d\n", (RTGCPTR)pCtx->rip, VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)), pCtx->eflags.Bits.u1IF));
     3983        LogFlow(("VMX_EXIT_IRQ_WINDOW %RGv pending=%d IF=%d\n", (RTGCPTR)pCtx->rip,
     3984                 VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)), pCtx->eflags.Bits.u1IF));
    37483985        pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;
    37493986        rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     
    38694106        STAM_COUNTER_INC((exitReason == VMX_EXIT_RDMSR) ? &pVCpu->hwaccm.s.StatExitRdmsr : &pVCpu->hwaccm.s.StatExitWrmsr);
    38704107
    3871         /* Note: the intel manual claims there's a REX version of RDMSR that's slightly different, so we play safe by completely disassembling the instruction. */
     4108        /*
     4109         * Note: The Intel spec. claims there's an REX version of RDMSR that's slightly different,
     4110         * so we play safe by completely disassembling the instruction.
     4111         */
    38724112        Log2(("VMX: %s\n", (exitReason == VMX_EXIT_RDMSR) ? "rdmsr" : "wrmsr"));
    38734113        rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0);
     
    38754115        {
    38764116            /* EIP has been updated already. */
    3877 
    38784117            /* Only resume if successful. */
    38794118            goto ResumeExecution;
    38804119        }
    3881         AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: %s failed with %Rrc\n", (exitReason == VMX_EXIT_RDMSR) ? "rdmsr" : "wrmsr", VBOXSTRICTRC_VAL(rc)));
     4120        AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: %s failed with %Rrc\n",
     4121                                              (exitReason == VMX_EXIT_RDMSR) ? "rdmsr" : "wrmsr", VBOXSTRICTRC_VAL(rc)));
    38824122        break;
    38834123    }
     
    38894129        switch (VMX_EXIT_QUALIFICATION_CRX_ACCESS(exitQualification))
    38904130        {
    3891         case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE:
    3892             Log2(("VMX: %RGv mov cr%d, x\n", (RTGCPTR)pCtx->rip, VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)));
    3893             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
    3894             rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
    3895                                      VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification),
    3896                                      VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification));
    3897 
    3898             switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification))
     4131            case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE:
    38994132            {
    3900             case 0:
    3901                 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_GUEST_CR3;
    3902                 break;
    3903             case 2:
    3904                 break;
    3905             case 3:
    3906                 Assert(!pVM->hwaccm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx));
    3907                 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
    3908                 break;
    3909             case 4:
    3910                 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
    3911                 break;
    3912             case 8:
    3913                 /* CR8 contains the APIC TPR */
    3914                 Assert(!(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
    3915                 break;
    3916 
    3917             default:
    3918                 AssertFailed();
     4133                Log2(("VMX: %RGv mov cr%d, x\n", (RTGCPTR)pCtx->rip, VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)));
     4134                STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
     4135                rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
     4136                                         VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification),
     4137                                         VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification));
     4138                switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification))
     4139                {
     4140                    case 0:
     4141                        pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_GUEST_CR3;
     4142                        break;
     4143                    case 2:
     4144                        break;
     4145                    case 3:
     4146                        Assert(!pVM->hwaccm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx));
     4147                        pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
     4148                        break;
     4149                    case 4:
     4150                        pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
     4151                        break;
     4152                    case 8:
     4153                        /* CR8 contains the APIC TPR */
     4154                        Assert(!(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1
     4155                                 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
     4156                        break;
     4157
     4158                    default:
     4159                        AssertFailed();
     4160                        break;
     4161                }
    39194162                break;
    39204163            }
    3921             break;
    3922 
    3923         case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ:
    3924             Log2(("VMX: mov x, crx\n"));
    3925             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
    3926 
    3927             Assert(!pVM->hwaccm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx) || VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != USE_REG_CR3);
    3928 
    3929             /* CR8 reads only cause an exit when the TPR shadow feature isn't present. */
    3930             Assert(VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != 8 || !(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
    3931 
    3932             rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
    3933                                     VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification),
    3934                                     VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification));
    3935             break;
    3936 
    3937         case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS:
    3938             Log2(("VMX: clts\n"));
    3939             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCLTS);
    3940             rc = EMInterpretCLTS(pVM, pVCpu);
    3941             pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
    3942             break;
    3943 
    3944         case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW:
    3945             Log2(("VMX: lmsw %x\n", VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification)));
    3946             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitLMSW);
    3947             rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification));
    3948             pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
    3949             break;
     4164
     4165            case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ:
     4166            {
     4167                Log2(("VMX: mov x, crx\n"));
     4168                STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
     4169
     4170                Assert(   !pVM->hwaccm.s.fNestedPaging
     4171                       || !CPUMIsGuestInPagedProtectedModeEx(pCtx)
     4172                       || VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != USE_REG_CR3);
     4173
     4174                /* CR8 reads only cause an exit when the TPR shadow feature isn't present. */
     4175                Assert(   VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != 8
     4176                       || !(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
     4177
     4178                rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
     4179                                        VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification),
     4180                                        VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification));
     4181                break;
     4182            }
     4183
     4184            case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS:
     4185            {
     4186                Log2(("VMX: clts\n"));
     4187                STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCLTS);
     4188                rc = EMInterpretCLTS(pVM, pVCpu);
     4189                pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     4190                break;
     4191            }
     4192
     4193            case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW:
     4194            {
     4195                Log2(("VMX: lmsw %x\n", VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification)));
     4196                STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitLMSW);
     4197                rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification));
     4198                pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     4199                break;
     4200            }
    39504201        }
    39514202
     
    39704221            &&  !CPUMIsHyperDebugStateActive(pVCpu))
    39714222        {
    3972             /* Disable drx move intercepts. */
     4223            /* Disable DRx move intercepts. */
    39734224            pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
    39744225            rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     
    39814232#ifdef LOG_ENABLED
    39824233            if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
    3983                 Log(("VMX_EXIT_DRX_MOVE: write DR%d genreg %d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification), VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification)));
     4234            {
     4235                Log(("VMX_EXIT_DRX_MOVE: write DR%d genreg %d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification),
     4236                     VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification)));
     4237            }
    39844238            else
    39854239                Log(("VMX_EXIT_DRX_MOVE: read DR%d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification)));
     
    39974251        }
    39984252
    3999         /** @todo clear VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT after the first time and restore drx registers afterwards */
     4253        /** @todo clear VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT after the first
     4254         *        time and restore DRx registers afterwards */
    40004255        if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
    40014256        {
    4002             Log2(("VMX: mov drx%d, genreg%d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification), VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification)));
     4257            Log2(("VMX: mov DRx%d, genreg%d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification),
     4258                  VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification)));
    40034259            STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxWrite);
    40044260            rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
     
    40104266        else
    40114267        {
    4012             Log2(("VMX: mov x, drx\n"));
     4268            Log2(("VMX: mov x, DRx\n"));
    40134269            STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxRead);
    40144270            rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
     
    40334289    {
    40344290        STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatExit2Sub1, y1);
     4291        uint32_t uPort;
    40354292        uint32_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(exitQualification);
    4036         uint32_t uPort;
    40374293        bool     fIOWrite = (VMX_EXIT_QUALIFICATION_IO_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
    40384294
    40394295        /** @todo necessary to make the distinction? */
    40404296        if (VMX_EXIT_QUALIFICATION_IO_ENCODING(exitQualification) == VMX_EXIT_QUALIFICATION_IO_ENCODING_DX)
    4041         {
    40424297            uPort = pCtx->edx & 0xffff;
    4043         }
    40444298        else
    40454299            uPort = VMX_EXIT_QUALIFICATION_IO_PORT(exitQualification);  /* Immediate encoding. */
    40464300
    4047         /* paranoia */
    4048         if (RT_UNLIKELY(uIOWidth == 2 || uIOWidth >= 4))
     4301        if (RT_UNLIKELY(uIOWidth == 2 || uIOWidth >= 4))         /* paranoia */
    40494302        {
    40504303            rc = fIOWrite ? VINF_IOM_R3_IOPORT_WRITE : VINF_IOM_R3_IOPORT_READ;
     
    40544307
    40554308        uint32_t cbSize = g_aIOSize[uIOWidth];
    4056 
    40574309        if (VMX_EXIT_QUALIFICATION_IO_STRING(exitQualification))
    40584310        {
     
    40844336        else
    40854337        {
    4086             /* normal in/out */
     4338            /* Normal in/out */
    40874339            uint32_t uAndVal = g_aIOOpAnd[uIOWidth];
    40884340
     
    41124364            }
    41134365        }
     4366
    41144367        /*
    41154368         * Handled the I/O return codes.
     
    41264379                {
    41274380                    STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxIOCheck);
    4128                     for (unsigned i=0;i<4;i++)
     4381                    for (unsigned i = 0; i < 4; i++)
    41294382                    {
    41304383                        unsigned uBPLen = g_aIOSize[X86_DR7_GET_LEN(pCtx->dr[7], i)];
     
    41444397                            uDR6 |= (uint64_t)RT_BIT(i);
    41454398
    4146                             /* Note: AMD64 Architecture Programmer's Manual 13.1:
    4147                              * Bits 15:13 of the DR6 register is never cleared by the processor and must be cleared by software after
    4148                              * the contents have been read.
     4399                            /*
     4400                             * Note: AMD64 Architecture Programmer's Manual 13.1:
     4401                             * Bits 15:13 of the DR6 register is never cleared by the processor and must
     4402                             * be cleared by software after the contents have been read.
    41494403                             */
    41504404                            ASMSetDR6(uDR6);
    41514405
    4152                             /* X86_DR7_GD will be cleared if drx accesses should be trapped inside the guest. */
     4406                            /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
    41534407                            pCtx->dr[7] &= ~X86_DR7_GD;
    41544408
     
    41684422
    41694423                            Log(("Inject IO debug trap at %RGv\n", (RTGCPTR)pCtx->rip));
    4170                             rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 0, 0);
     4424                            rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
     4425                                                     0 /* cbInstr */, 0 /* errCode */);
    41714426                            AssertRC(rc2);
    41724427
     
    41894444            Assert(fIOWrite);
    41904445        else
    4191             AssertMsg(RT_FAILURE(rc) || rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rc)));
     4446        {
     4447            AssertMsg(   RT_FAILURE(rc)
     4448                      || rc == VINF_EM_RAW_EMULATE_INSTR
     4449                      || rc == VINF_EM_RAW_GUEST_TRAP
     4450                      || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rc)));
     4451        }
    41924452#endif
    41934453        STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub1, y1);
     
    42004460        goto ResumeExecution;
    42014461
    4202     case VMX_EXIT_APIC_ACCESS:          /* 44 APIC access. Guest software attempted to access memory at a physical address on the APIC-access page. */
     4462    case VMX_EXIT_APIC_ACCESS:          /* 44 APIC access. Guest software attempted to access memory at a physical address
     4463                                                            on the APIC-access page. */
    42034464    {
    42044465        LogFlow(("VMX_EXIT_APIC_ACCESS\n"));
     
    42074468        switch(uAccessType)
    42084469        {
    4209         case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
    4210         case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
    4211         {
    4212             RTGCPHYS GCPhys;
    4213             PDMApicGetBase(pVM, &GCPhys);
    4214             GCPhys &= PAGE_BASE_GC_MASK;
    4215             GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(exitQualification);
    4216 
    4217             LogFlow(("Apic access at %RGp\n", GCPhys));
    4218             rc = IOMMMIOPhysHandler(pVM, (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW, CPUMCTX2CORE(pCtx), GCPhys);
    4219             if (rc == VINF_SUCCESS)
    4220                 goto ResumeExecution;   /* rip already updated */
    4221             break;
    4222         }
    4223 
    4224         default:
    4225             rc = VINF_EM_RAW_EMULATE_INSTR;
    4226             break;
     4470            case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
     4471            case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
     4472            {
     4473                RTGCPHYS GCPhys;
     4474                PDMApicGetBase(pVM, &GCPhys);
     4475                GCPhys &= PAGE_BASE_GC_MASK;
     4476                GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(exitQualification);
     4477
     4478                LogFlow(("Apic access at %RGp\n", GCPhys));
     4479                rc = IOMMMIOPhysHandler(pVM, (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
     4480                                        CPUMCTX2CORE(pCtx), GCPhys);
     4481                if (rc == VINF_SUCCESS)
     4482                    goto ResumeExecution;   /* rip already updated */
     4483                break;
     4484            }
     4485
     4486            default:
     4487                rc = VINF_EM_RAW_EMULATE_INSTR;
     4488                break;
    42274489        }
    42284490        break;
     
    42404502    }
    42414503
    4242     /* Note: the guest state isn't entirely synced back at this stage. */
     4504
     4505    /*
     4506     * Note: The guest state is not entirely synced back at this stage!
     4507     */
    42434508
    42444509    /* Investigate why there was a VM-exit. (part 2) */
     
    43694634
    43704635    case VMX_EXIT_TPR:                  /* 43 TPR below threshold. Guest software executed MOV to CR8. */
    4371     case VMX_EXIT_APIC_ACCESS:          /* 44 APIC access. Guest software attempted to access memory at a physical address on the APIC-access page. */
    43724636    case VMX_EXIT_RDMSR:                /* 31 RDMSR. Guest software attempted to execute RDMSR. */
    43734637    case VMX_EXIT_WRMSR:                /* 32 WRMSR. Guest software attempted to execute WRMSR. */
    43744638    case VMX_EXIT_PAUSE:                /* 40 Guest software attempted to execute PAUSE. */
    43754639    case VMX_EXIT_MONITOR:              /* 39 Guest software attempted to execute MONITOR. */
    4376         /* Note: If we decide to emulate them here, then we must sync the MSRs that could have been changed (sysenter, fs/gs base)!!! */
     4640    case VMX_EXIT_APIC_ACCESS:          /* 44 APIC access. Guest software attempted to access memory at a physical address
     4641                                                        on the APIC-access page. */
     4642    {
     4643        /*
     4644         * If we decided to emulate them here, then we must sync the MSRs that could have been changed (sysenter, FS/GS base)
     4645         */
    43774646        rc = VERR_EM_INTERPRETER;
    43784647        break;
     4648    }
    43794649
    43804650    case VMX_EXIT_IRQ_WINDOW:           /* 7 Interrupt window. */
     
    44044674        Log(("VMX_VMCS_GUEST_RFLAGS     %08x\n", val2));
    44054675
    4406         VMX_LOG_SELREG(CS, "CS", val2);
    4407         VMX_LOG_SELREG(DS, "DS", val2);
    4408         VMX_LOG_SELREG(ES, "ES", val2);
    4409         VMX_LOG_SELREG(FS, "FS", val2);
    4410         VMX_LOG_SELREG(GS, "GS", val2);
    4411         VMX_LOG_SELREG(SS, "SS", val2);
    4412         VMX_LOG_SELREG(TR, "TR", val2);
     4676        VMX_LOG_SELREG(CS,   "CS",  val2);
     4677        VMX_LOG_SELREG(DS,   "DS",  val2);
     4678        VMX_LOG_SELREG(ES,   "ES",  val2);
     4679        VMX_LOG_SELREG(FS,   "FS",  val2);
     4680        VMX_LOG_SELREG(GS,   "GS",  val2);
     4681        VMX_LOG_SELREG(SS,   "SS",  val2);
     4682        VMX_LOG_SELREG(TR,   "TR",  val2);
    44134683        VMX_LOG_SELREG(LDTR, "LDTR", val2);
    44144684
     
    44304700
    44314701    }
     4702
    44324703end:
    4433 
    44344704    /* We now going back to ring-3, so clear the action flag. */
    44354705    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
    44364706
    4437     /* Signal changes for the recompiler. */
    4438     CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_TR | CPUM_CHANGED_HIDDEN_SEL_REGS);
    4439 
    4440     /* If we executed vmlaunch/vmresume and an external irq was pending, then we don't have to do a full sync the next time. */
     4707    /*
     4708     * Signal changes for the recompiler.
     4709     */
     4710    CPUMSetChangedFlags(pVCpu,
     4711                          CPUM_CHANGED_SYSENTER_MSR
     4712                        | CPUM_CHANGED_LDTR
     4713                        | CPUM_CHANGED_GDTR
     4714                        | CPUM_CHANGED_IDTR
     4715                        | CPUM_CHANGED_TR
     4716                        | CPUM_CHANGED_HIDDEN_SEL_REGS);
     4717
     4718    /*
     4719     * If we executed vmlaunch/vmresume and an external IRQ was pending, then we don't have to do a full sync the next time.
     4720     */
    44414721    if (    exitReason == VMX_EXIT_EXTERNAL_IRQ
    44424722        &&  !VMX_EXIT_INTERRUPTION_INFO_VALID(intInfo))
     
    44544734    }
    44554735
    4456     /* translate into a less severe return code */
     4736    /* Translate into a less severe return code */
    44574737    if (rc == VERR_EM_INTERPRETER)
    44584738        rc = VINF_EM_RAW_EMULATE_INSTR;
    4459     else
    4460     /* Try to extract more information about what might have gone wrong here. */
    4461     if (rc == VERR_VMX_INVALID_VMCS_PTR)
    4462     {
     4739    else if (rc == VERR_VMX_INVALID_VMCS_PTR)
     4740    {
     4741        /* Try to extract more information about what might have gone wrong here. */
    44634742        VMXGetActivateVMCS(&pVCpu->hwaccm.s.vmx.lasterror.u64VMCSPhys);
    44644743        pVCpu->hwaccm.s.vmx.lasterror.ulVMCSRevision = *(uint32_t *)pVCpu->hwaccm.s.vmx.pvVMCS;
     
    44714750
    44724751#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    4473     /* Restore interrupts if we exitted after disabling them. */
     4752    /* Restore interrupts if we exited after disabling them. */
    44744753    if (uOldEFlags != ~(RTCCUINTREG)0)
    44754754        ASMSetFlags(uOldEFlags);
     
    44854764
    44864765/**
    4487  * Enters the VT-x session
     4766 * Enters the VT-x session.
    44884767 *
    44894768 * @returns VBox status code.
    4490  * @param   pVM         The VM to operate on.
    4491  * @param   pVCpu       The VMCPU to operate on.
    4492  * @param   pCpu        CPU info struct
     4769 * @param   pVM         Pointer to the VM.
     4770 * @param   pVCpu       Pointer to the VMCPU.
     4771 * @param   pCpu        Pointer to the CPU info struct.
    44934772 */
    44944773VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
     
    45044783    }
    45054784
    4506     /* Activate the VM Control Structure. */
     4785    /* Activate the VMCS. */
    45074786    int rc = VMXActivateVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
    45084787    if (RT_FAILURE(rc))
     
    45154794
    45164795/**
    4517  * Leaves the VT-x session
     4796 * Leaves the VT-x session.
    45184797 *
    45194798 * @returns VBox status code.
    4520  * @param   pVM         The VM to operate on.
    4521  * @param   pVCpu       The VMCPU to operate on.
    4522  * @param   pCtx        CPU context
     4799 * @param   pVM         Pointer to the VM.
     4800 * @param   pVCpu       Pointer to the VMCPU.
     4801 * @param   pCtx        Pointer to the guests CPU context.
    45234802 */
    45244803VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     
    45344813    else
    45354814#endif
    4536     /* Save the guest debug state if necessary. */
     4815
     4816    /*
     4817     * Save the guest debug state if necessary.
     4818     */
    45374819    if (CPUMIsGuestDebugStateActive(pVCpu))
    45384820    {
    45394821        CPUMR0SaveGuestDebugState(pVM, pVCpu, pCtx, true /* save DR6 */);
    45404822
    4541         /* Enable drx move intercepts again. */
     4823        /* Enable DRx move intercepts again. */
    45424824        pVCpu->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
    45434825        int rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     
    45504832        Assert(pVCpu->hwaccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
    45514833
    4552     /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */
     4834    /*
     4835     * Clear VMCS, marking it inactive, clearing implementation-specific data and writing
     4836     * VMCS data back to memory.
     4837     */
    45534838    int rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
    45544839    AssertRC(rc);
     
    45624847 *
    45634848 * @returns VBox status code.
    4564  * @param   pVM         The VM to operate on.
    4565  * @param   pVCpu       The VM CPU to operate on.
     4849 * @param   pVM         Pointer to the VM.
     4850 * @param   pVCpu       Pointer to the VMCPU.
    45664851 * @param   enmFlush    Type of flush.
    45674852 */
     
    45834868 *
    45844869 * @returns VBox status code.
    4585  * @param   pVM         The VM to operate on.
    4586  * @param   pVCpu       The VM CPU to operate on (can be NULL depending on @a
     4870 * @param   pVM         Pointer to the VM.
     4871 * @param   pVCpu       Pointer to the VMCPU (can be NULL depending on @a
    45874872 *                      enmFlush).
    45884873 * @param   enmFlush    Type of flush.
     
    45934878{
    45944879#if HC_ARCH_BITS == 32
    4595     /* If we get a flush in 64 bits guest mode, then force a full TLB flush. Invvpid probably takes only 32 bits addresses. (@todo) */
     4880    /*
     4881     * If we get a flush in 64-bit guest mode, then force a full TLB flush. invvpid probably takes only 32-bit addresses.
     4882     */
    45964883    if (   CPUMIsGuestInLongMode(pVCpu)
    45974884        && !VMX_IS_64BIT_HOST_MODE())
     
    46294916 *
    46304917 * @returns VBox status code.
    4631  * @param   pVM         The VM to operate on.
    4632  * @param   pVCpu       The VM CPU to operate on.
    4633  * @param   GCVirt      Page to invalidate.
     4918 * @param   pVM         Pointer to the VM.
     4919 * @param   pVCpu       Pointer to the VMCPU.
     4920 * @param   GCVirt      Guest virtual address of the page to invalidate.
    46344921 */
    46354922VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
     
    46714958 *
    46724959 * @returns VBox status code.
    4673  * @param   pVM         The VM to operate on.
     4960 * @param   pVM         Pointer to the VM.
    46744961 * @param   pVCpu       The VM CPU to operate on.
    4675  * @param   GCPhys      Page to invalidate.
     4962 * @param   GCPhys      Guest physical address of the page to invalidate.
    46764963 */
    46774964VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
     
    46904977
    46914978/**
    4692  * Report world switch error and dump some useful debug info
     4979 * Report world switch error and dump some useful debug info.
    46934980 *
    4694  * @param   pVM         The VM to operate on.
    4695  * @param   pVCpu       The VMCPU to operate on.
    4696  * @param   rc          Return code
    4697  * @param   pCtx        Current CPU context (not updated)
     4981 * @param   pVM         Pointer to the VM.
     4982 * @param   pVCpu       Pointer to the VMCPU.
     4983 * @param   rc          Return code.
     4984 * @param   pCtx        Pointer to the current guest CPU context (not updated).
    46984985 */
    46994986static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc, PCPUMCTX pCtx)
     
    47034990    switch (VBOXSTRICTRC_VAL(rc))
    47044991    {
    4705     case VERR_VMX_INVALID_VMXON_PTR:
    4706         AssertFailed();
    4707         break;
    4708 
    4709     case VERR_VMX_UNABLE_TO_START_VM:
    4710     case VERR_VMX_UNABLE_TO_RESUME_VM:
    4711     {
    4712         int         rc2;
    4713         RTCCUINTREG exitReason, instrError;
    4714 
    4715         rc2  = VMXReadVMCS(VMX_VMCS32_RO_EXIT_REASON, &exitReason);
    4716         rc2 |= VMXReadVMCS(VMX_VMCS32_RO_VM_INSTR_ERROR, &instrError);
    4717         AssertRC(rc2);
    4718         if (rc2 == VINF_SUCCESS)
    4719         {
    4720             Log(("Unable to start/resume VM for reason: %x. Instruction error %x\n", (uint32_t)exitReason, (uint32_t)instrError));
    4721             Log(("Current stack %08x\n", &rc2));
    4722 
    4723             pVCpu->hwaccm.s.vmx.lasterror.ulInstrError = instrError;
    4724             pVCpu->hwaccm.s.vmx.lasterror.ulExitReason = exitReason;
     4992        case VERR_VMX_INVALID_VMXON_PTR:
     4993            AssertFailed();
     4994            break;
     4995
     4996        case VERR_VMX_UNABLE_TO_START_VM:
     4997        case VERR_VMX_UNABLE_TO_RESUME_VM:
     4998        {
     4999            int         rc2;
     5000            RTCCUINTREG exitReason, instrError;
     5001
     5002            rc2  = VMXReadVMCS(VMX_VMCS32_RO_EXIT_REASON, &exitReason);
     5003            rc2 |= VMXReadVMCS(VMX_VMCS32_RO_VM_INSTR_ERROR, &instrError);
     5004            AssertRC(rc2);
     5005            if (rc2 == VINF_SUCCESS)
     5006            {
     5007                Log(("Unable to start/resume VM for reason: %x. Instruction error %x\n", (uint32_t)exitReason,
     5008                     (uint32_t)instrError));
     5009                Log(("Current stack %08x\n", &rc2));
     5010
     5011                pVCpu->hwaccm.s.vmx.lasterror.ulInstrError = instrError;
     5012                pVCpu->hwaccm.s.vmx.lasterror.ulExitReason = exitReason;
    47255013
    47265014#ifdef VBOX_STRICT
    4727             RTGDTR      gdtr;
    4728             PCX86DESCHC pDesc;
    4729             RTCCUINTREG val;
    4730 
    4731             ASMGetGDTR(&gdtr);
    4732 
    4733             VMXReadVMCS(VMX_VMCS64_GUEST_RIP, &val);
    4734             Log(("Old eip %RGv new %RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)val));
    4735             VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, &val);
    4736             Log(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS   %08x\n", val));
    4737             VMXReadVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, &val);
    4738             Log(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS  %08x\n", val));
    4739             VMXReadVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, &val);
    4740             Log(("VMX_VMCS_CTRL_ENTRY_CONTROLS      %08x\n", val));
    4741             VMXReadVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, &val);
    4742             Log(("VMX_VMCS_CTRL_EXIT_CONTROLS       %08x\n", val));
    4743 
    4744             VMXReadVMCS(VMX_VMCS_HOST_CR0, &val);
    4745             Log(("VMX_VMCS_HOST_CR0 %08x\n", val));
    4746 
    4747             VMXReadVMCS(VMX_VMCS_HOST_CR3, &val);
    4748             Log(("VMX_VMCS_HOST_CR3 %08x\n", val));
    4749 
    4750             VMXReadVMCS(VMX_VMCS_HOST_CR4, &val);
    4751             Log(("VMX_VMCS_HOST_CR4 %08x\n", val));
    4752 
    4753             VMXReadVMCS(VMX_VMCS16_HOST_FIELD_CS, &val);
    4754             Log(("VMX_VMCS_HOST_FIELD_CS %08x\n", val));
    4755 
    4756             VMXReadVMCS(VMX_VMCS_GUEST_RFLAGS, &val);
    4757             Log(("VMX_VMCS_GUEST_RFLAGS %08x\n", val));
    4758 
    4759             if (val < gdtr.cbGdt)
    4760             {
    4761                 pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    4762                 HWACCMR0DumpDescriptor(pDesc, val, "CS: ");
    4763             }
    4764 
    4765             VMXReadVMCS(VMX_VMCS16_HOST_FIELD_DS, &val);
    4766             Log(("VMX_VMCS_HOST_FIELD_DS %08x\n", val));
    4767             if (val < gdtr.cbGdt)
    4768             {
    4769                 pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    4770                 HWACCMR0DumpDescriptor(pDesc, val, "DS: ");
    4771             }
    4772 
    4773             VMXReadVMCS(VMX_VMCS16_HOST_FIELD_ES, &val);
    4774             Log(("VMX_VMCS_HOST_FIELD_ES %08x\n", val));
    4775             if (val < gdtr.cbGdt)
    4776             {
    4777                 pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    4778                 HWACCMR0DumpDescriptor(pDesc, val, "ES: ");
    4779             }
    4780 
    4781             VMXReadVMCS(VMX_VMCS16_HOST_FIELD_FS, &val);
    4782             Log(("VMX_VMCS16_HOST_FIELD_FS %08x\n", val));
    4783             if (val < gdtr.cbGdt)
    4784             {
    4785                 pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    4786                 HWACCMR0DumpDescriptor(pDesc, val, "FS: ");
    4787             }
    4788 
    4789             VMXReadVMCS(VMX_VMCS16_HOST_FIELD_GS, &val);
    4790             Log(("VMX_VMCS16_HOST_FIELD_GS %08x\n", val));
    4791             if (val < gdtr.cbGdt)
    4792             {
    4793                 pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    4794                 HWACCMR0DumpDescriptor(pDesc, val, "GS: ");
    4795             }
    4796 
    4797             VMXReadVMCS(VMX_VMCS16_HOST_FIELD_SS, &val);
    4798             Log(("VMX_VMCS16_HOST_FIELD_SS %08x\n", val));
    4799             if (val < gdtr.cbGdt)
    4800             {
    4801                 pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    4802                 HWACCMR0DumpDescriptor(pDesc, val, "SS: ");
    4803             }
    4804 
    4805             VMXReadVMCS(VMX_VMCS16_HOST_FIELD_TR, &val);
    4806             Log(("VMX_VMCS16_HOST_FIELD_TR %08x\n", val));
    4807             if (val < gdtr.cbGdt)
    4808             {
    4809                 pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    4810                 HWACCMR0DumpDescriptor(pDesc, val, "TR: ");
    4811             }
    4812 
    4813             VMXReadVMCS(VMX_VMCS_HOST_TR_BASE, &val);
    4814             Log(("VMX_VMCS_HOST_TR_BASE %RHv\n", val));
    4815 
    4816             VMXReadVMCS(VMX_VMCS_HOST_GDTR_BASE, &val);
    4817             Log(("VMX_VMCS_HOST_GDTR_BASE %RHv\n", val));
    4818             VMXReadVMCS(VMX_VMCS_HOST_IDTR_BASE, &val);
    4819             Log(("VMX_VMCS_HOST_IDTR_BASE %RHv\n", val));
    4820 
    4821             VMXReadVMCS(VMX_VMCS32_HOST_SYSENTER_CS, &val);
    4822             Log(("VMX_VMCS_HOST_SYSENTER_CS  %08x\n", val));
    4823 
    4824             VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_EIP, &val);
    4825             Log(("VMX_VMCS_HOST_SYSENTER_EIP %RHv\n", val));
    4826 
    4827             VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_ESP, &val);
    4828             Log(("VMX_VMCS_HOST_SYSENTER_ESP %RHv\n", val));
    4829 
    4830             VMXReadVMCS(VMX_VMCS_HOST_RSP, &val);
    4831             Log(("VMX_VMCS_HOST_RSP %RHv\n", val));
    4832             VMXReadVMCS(VMX_VMCS_HOST_RIP, &val);
    4833             Log(("VMX_VMCS_HOST_RIP %RHv\n", val));
    4834 
     5015                RTGDTR      gdtr;
     5016                PCX86DESCHC pDesc;
     5017                RTCCUINTREG val;
     5018
     5019                ASMGetGDTR(&gdtr);
     5020
     5021                VMXReadVMCS(VMX_VMCS64_GUEST_RIP, &val);
     5022                Log(("Old eip %RGv new %RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)val));
     5023                VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS,    &val);
     5024                Log(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS   %08x\n", val));
     5025                VMXReadVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS,   &val);
     5026                Log(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS  %08x\n", val));
     5027                VMXReadVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS,       &val);
     5028                Log(("VMX_VMCS_CTRL_ENTRY_CONTROLS      %08x\n", val));
     5029                VMXReadVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS,        &val);
     5030                Log(("VMX_VMCS_CTRL_EXIT_CONTROLS       %08x\n", val));
     5031
     5032                VMXReadVMCS(VMX_VMCS_HOST_CR0,  &val);
     5033                Log(("VMX_VMCS_HOST_CR0 %08x\n", val));
     5034                VMXReadVMCS(VMX_VMCS_HOST_CR3,  &val);
     5035                Log(("VMX_VMCS_HOST_CR3 %08x\n", val));
     5036                VMXReadVMCS(VMX_VMCS_HOST_CR4,  &val);
     5037                Log(("VMX_VMCS_HOST_CR4 %08x\n", val));
     5038
     5039                VMXReadVMCS(VMX_VMCS16_HOST_FIELD_CS, &val);
     5040                Log(("VMX_VMCS_HOST_FIELD_CS %08x\n",  val));
     5041                VMXReadVMCS(VMX_VMCS_GUEST_RFLAGS,  &val);
     5042                Log(("VMX_VMCS_GUEST_RFLAGS %08x\n", val));
     5043
     5044                if (val < gdtr.cbGdt)
     5045                {
     5046                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
     5047                    HWACCMR0DumpDescriptor(pDesc, val, "CS: ");
     5048                }
     5049
     5050                VMXReadVMCS(VMX_VMCS16_HOST_FIELD_DS, &val);
     5051                Log(("VMX_VMCS_HOST_FIELD_DS %08x\n",  val));
     5052                if (val < gdtr.cbGdt)
     5053                {
     5054                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
     5055                    HWACCMR0DumpDescriptor(pDesc, val, "DS: ");
     5056                }
     5057
     5058                VMXReadVMCS(VMX_VMCS16_HOST_FIELD_ES, &val);
     5059                Log(("VMX_VMCS_HOST_FIELD_ES %08x\n",  val));
     5060                if (val < gdtr.cbGdt)
     5061                {
     5062                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
     5063                    HWACCMR0DumpDescriptor(pDesc, val, "ES: ");
     5064                }
     5065
     5066                VMXReadVMCS(VMX_VMCS16_HOST_FIELD_FS, &val);
     5067                Log(("VMX_VMCS16_HOST_FIELD_FS %08x\n", val));
     5068                if (val < gdtr.cbGdt)
     5069                {
     5070                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
     5071                    HWACCMR0DumpDescriptor(pDesc, val, "FS: ");
     5072                }
     5073
     5074                VMXReadVMCS(VMX_VMCS16_HOST_FIELD_GS,  &val);
     5075                Log(("VMX_VMCS16_HOST_FIELD_GS %08x\n", val));
     5076                if (val < gdtr.cbGdt)
     5077                {
     5078                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
     5079                    HWACCMR0DumpDescriptor(pDesc, val, "GS: ");
     5080                }
     5081
     5082                VMXReadVMCS(VMX_VMCS16_HOST_FIELD_SS,  &val);
     5083                Log(("VMX_VMCS16_HOST_FIELD_SS %08x\n", val));
     5084                if (val < gdtr.cbGdt)
     5085                {
     5086                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
     5087                    HWACCMR0DumpDescriptor(pDesc, val, "SS: ");
     5088                }
     5089
     5090                VMXReadVMCS(VMX_VMCS16_HOST_FIELD_TR,  &val);
     5091                Log(("VMX_VMCS16_HOST_FIELD_TR %08x\n", val));
     5092                if (val < gdtr.cbGdt)
     5093                {
     5094                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
     5095                    HWACCMR0DumpDescriptor(pDesc, val, "TR: ");
     5096                }
     5097
     5098                VMXReadVMCS(VMX_VMCS_HOST_TR_BASE,         &val);
     5099                Log(("VMX_VMCS_HOST_TR_BASE %RHv\n",        val));
     5100                VMXReadVMCS(VMX_VMCS_HOST_GDTR_BASE,       &val);
     5101                Log(("VMX_VMCS_HOST_GDTR_BASE %RHv\n",      val));
     5102                VMXReadVMCS(VMX_VMCS_HOST_IDTR_BASE,       &val);
     5103                Log(("VMX_VMCS_HOST_IDTR_BASE %RHv\n",      val));
     5104                VMXReadVMCS(VMX_VMCS32_HOST_SYSENTER_CS,   &val);
     5105                Log(("VMX_VMCS_HOST_SYSENTER_CS  %08x\n",   val));
     5106                VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_EIP,    &val);
     5107                Log(("VMX_VMCS_HOST_SYSENTER_EIP %RHv\n",   val));
     5108                VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_ESP,    &val);
     5109                Log(("VMX_VMCS_HOST_SYSENTER_ESP %RHv\n",   val));
     5110                VMXReadVMCS(VMX_VMCS_HOST_RSP,             &val);
     5111                Log(("VMX_VMCS_HOST_RSP %RHv\n",            val));
     5112                VMXReadVMCS(VMX_VMCS_HOST_RIP,             &val);
     5113                Log(("VMX_VMCS_HOST_RIP %RHv\n",            val));
    48355114# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    4836             if (VMX_IS_64BIT_HOST_MODE())
    4837             {
    4838                 Log(("MSR_K6_EFER       = %RX64\n", ASMRdMsr(MSR_K6_EFER)));
    4839                 Log(("MSR_K6_STAR       = %RX64\n", ASMRdMsr(MSR_K6_STAR)));
    4840                 Log(("MSR_K8_LSTAR      = %RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
    4841                 Log(("MSR_K8_CSTAR      = %RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
    4842                 Log(("MSR_K8_SF_MASK    = %RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
    4843             }
     5115                if (VMX_IS_64BIT_HOST_MODE())
     5116                {
     5117                    Log(("MSR_K6_EFER       = %RX64\n", ASMRdMsr(MSR_K6_EFER)));
     5118                    Log(("MSR_K6_STAR       = %RX64\n", ASMRdMsr(MSR_K6_STAR)));
     5119                    Log(("MSR_K8_LSTAR      = %RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
     5120                    Log(("MSR_K8_CSTAR      = %RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
     5121                    Log(("MSR_K8_SF_MASK    = %RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
     5122                }
    48445123# endif
    48455124#endif /* VBOX_STRICT */
    4846         }
    4847         break;
    4848     }
    4849 
    4850     default:
    4851         /* impossible */
    4852         AssertMsgFailed(("%Rrc (%#x)\n", VBOXSTRICTRC_VAL(rc), VBOXSTRICTRC_VAL(rc)));
    4853         break;
     5125            }
     5126            break;
     5127        }
     5128
     5129        default:
     5130            /* impossible */
     5131            AssertMsgFailed(("%Rrc (%#x)\n", VBOXSTRICTRC_VAL(rc), VBOXSTRICTRC_VAL(rc)));
     5132            break;
    48545133    }
    48555134}
    48565135
     5136
    48575137#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    4858 
    48595138/**
    4860  * Prepares for and executes VMLAUNCH (64 bits guest mode)
     5139 * Prepares for and executes VMLAUNCH (64 bits guest mode).
    48615140 *
    4862  * @returns VBox status code
    4863  * @param   fResume     vmlauch/vmresume
    4864  * @param   pCtx        Guest context
    4865  * @param   pCache      VMCS cache
    4866  * @param   pVM         The VM to operate on.
    4867  * @param   pVCpu       The VMCPU to operate on.
     5141 * @returns VBox status code.
     5142 * @param   fResume     Whether to vmlauch/vmresume.
     5143 * @param   pCtx        Pointer to the guest CPU context.
     5144 * @param   pCache      Pointer to the VMCS cache.
     5145 * @param   pVM         Pointer to the VM.
     5146 * @param   pVCpu       Pointer to the VMCPU.
    48685147 */
    48695148DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
     
    49145193#ifdef DEBUG
    49155194    AssertMsg(pCache->TestIn.HCPhysCpuPage== HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
    4916     AssertMsg(pCache->TestIn.HCPhysVMCS   == pVCpu->hwaccm.s.vmx.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS, pVCpu->hwaccm.s.vmx.HCPhysVMCS));
    4917     AssertMsg(pCache->TestIn.HCPhysVMCS   == pCache->TestOut.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS, pCache->TestOut.HCPhysVMCS));
    4918     AssertMsg(pCache->TestIn.pCache       == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache, pCache->TestOut.pCache));
    4919     AssertMsg(pCache->TestIn.pCache       == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache), ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache)));
    4920     AssertMsg(pCache->TestIn.pCtx         == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx, pCache->TestOut.pCtx));
     5195    AssertMsg(pCache->TestIn.HCPhysVMCS   == pVCpu->hwaccm.s.vmx.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS,
     5196                                                                              pVCpu->hwaccm.s.vmx.HCPhysVMCS));
     5197    AssertMsg(pCache->TestIn.HCPhysVMCS   == pCache->TestOut.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS,
     5198                                                                          pCache->TestOut.HCPhysVMCS));
     5199    AssertMsg(pCache->TestIn.pCache       == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
     5200                                                                      pCache->TestOut.pCache));
     5201    AssertMsg(pCache->TestIn.pCache       == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache),
     5202              ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache)));
     5203    AssertMsg(pCache->TestIn.pCtx         == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
     5204                                                                    pCache->TestOut.pCtx));
    49215205    Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
    49225206#endif
     
    49245208}
    49255209
     5210
    49265211# ifdef VBOX_STRICT
    4927 
    49285212static bool hmR0VmxIsValidReadField(uint32_t idxField)
    49295213{
    4930     switch(idxField)
    4931     {
    4932     case VMX_VMCS64_GUEST_RIP:
    4933     case VMX_VMCS64_GUEST_RSP:
    4934     case VMX_VMCS_GUEST_RFLAGS:
    4935     case VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE:
    4936     case VMX_VMCS_CTRL_CR0_READ_SHADOW:
    4937     case VMX_VMCS64_GUEST_CR0:
    4938     case VMX_VMCS_CTRL_CR4_READ_SHADOW:
    4939     case VMX_VMCS64_GUEST_CR4:
    4940     case VMX_VMCS64_GUEST_DR7:
    4941     case VMX_VMCS32_GUEST_SYSENTER_CS:
    4942     case VMX_VMCS64_GUEST_SYSENTER_EIP:
    4943     case VMX_VMCS64_GUEST_SYSENTER_ESP:
    4944     case VMX_VMCS32_GUEST_GDTR_LIMIT:
    4945     case VMX_VMCS64_GUEST_GDTR_BASE:
    4946     case VMX_VMCS32_GUEST_IDTR_LIMIT:
    4947     case VMX_VMCS64_GUEST_IDTR_BASE:
    4948     case VMX_VMCS16_GUEST_FIELD_CS:
    4949     case VMX_VMCS32_GUEST_CS_LIMIT:
    4950     case VMX_VMCS64_GUEST_CS_BASE:
    4951     case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
    4952     case VMX_VMCS16_GUEST_FIELD_DS:
    4953     case VMX_VMCS32_GUEST_DS_LIMIT:
    4954     case VMX_VMCS64_GUEST_DS_BASE:
    4955     case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
    4956     case VMX_VMCS16_GUEST_FIELD_ES:
    4957     case VMX_VMCS32_GUEST_ES_LIMIT:
    4958     case VMX_VMCS64_GUEST_ES_BASE:
    4959     case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
    4960     case VMX_VMCS16_GUEST_FIELD_FS:
    4961     case VMX_VMCS32_GUEST_FS_LIMIT:
    4962     case VMX_VMCS64_GUEST_FS_BASE:
    4963     case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
    4964     case VMX_VMCS16_GUEST_FIELD_GS:
    4965     case VMX_VMCS32_GUEST_GS_LIMIT:
    4966     case VMX_VMCS64_GUEST_GS_BASE:
    4967     case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
    4968     case VMX_VMCS16_GUEST_FIELD_SS:
    4969     case VMX_VMCS32_GUEST_SS_LIMIT:
    4970     case VMX_VMCS64_GUEST_SS_BASE:
    4971     case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
    4972     case VMX_VMCS16_GUEST_FIELD_LDTR:
    4973     case VMX_VMCS32_GUEST_LDTR_LIMIT:
    4974     case VMX_VMCS64_GUEST_LDTR_BASE:
    4975     case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
    4976     case VMX_VMCS16_GUEST_FIELD_TR:
    4977     case VMX_VMCS32_GUEST_TR_LIMIT:
    4978     case VMX_VMCS64_GUEST_TR_BASE:
    4979     case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
    4980     case VMX_VMCS32_RO_EXIT_REASON:
    4981     case VMX_VMCS32_RO_VM_INSTR_ERROR:
    4982     case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
    4983     case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERRCODE:
    4984     case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
    4985     case VMX_VMCS32_RO_EXIT_INSTR_INFO:
    4986     case VMX_VMCS_RO_EXIT_QUALIFICATION:
    4987     case VMX_VMCS32_RO_IDT_INFO:
    4988     case VMX_VMCS32_RO_IDT_ERRCODE:
    4989     case VMX_VMCS64_GUEST_CR3:
    4990     case VMX_VMCS_EXIT_PHYS_ADDR_FULL:
    4991         return true;
     5214    switch (idxField)
     5215    {
     5216        case VMX_VMCS64_GUEST_RIP:
     5217        case VMX_VMCS64_GUEST_RSP:
     5218        case VMX_VMCS_GUEST_RFLAGS:
     5219        case VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE:
     5220        case VMX_VMCS_CTRL_CR0_READ_SHADOW:
     5221        case VMX_VMCS64_GUEST_CR0:
     5222        case VMX_VMCS_CTRL_CR4_READ_SHADOW:
     5223        case VMX_VMCS64_GUEST_CR4:
     5224        case VMX_VMCS64_GUEST_DR7:
     5225        case VMX_VMCS32_GUEST_SYSENTER_CS:
     5226        case VMX_VMCS64_GUEST_SYSENTER_EIP:
     5227        case VMX_VMCS64_GUEST_SYSENTER_ESP:
     5228        case VMX_VMCS32_GUEST_GDTR_LIMIT:
     5229        case VMX_VMCS64_GUEST_GDTR_BASE:
     5230        case VMX_VMCS32_GUEST_IDTR_LIMIT:
     5231        case VMX_VMCS64_GUEST_IDTR_BASE:
     5232        case VMX_VMCS16_GUEST_FIELD_CS:
     5233        case VMX_VMCS32_GUEST_CS_LIMIT:
     5234        case VMX_VMCS64_GUEST_CS_BASE:
     5235        case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
     5236        case VMX_VMCS16_GUEST_FIELD_DS:
     5237        case VMX_VMCS32_GUEST_DS_LIMIT:
     5238        case VMX_VMCS64_GUEST_DS_BASE:
     5239        case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
     5240        case VMX_VMCS16_GUEST_FIELD_ES:
     5241        case VMX_VMCS32_GUEST_ES_LIMIT:
     5242        case VMX_VMCS64_GUEST_ES_BASE:
     5243        case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
     5244        case VMX_VMCS16_GUEST_FIELD_FS:
     5245        case VMX_VMCS32_GUEST_FS_LIMIT:
     5246        case VMX_VMCS64_GUEST_FS_BASE:
     5247        case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
     5248        case VMX_VMCS16_GUEST_FIELD_GS:
     5249        case VMX_VMCS32_GUEST_GS_LIMIT:
     5250        case VMX_VMCS64_GUEST_GS_BASE:
     5251        case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
     5252        case VMX_VMCS16_GUEST_FIELD_SS:
     5253        case VMX_VMCS32_GUEST_SS_LIMIT:
     5254        case VMX_VMCS64_GUEST_SS_BASE:
     5255        case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
     5256        case VMX_VMCS16_GUEST_FIELD_LDTR:
     5257        case VMX_VMCS32_GUEST_LDTR_LIMIT:
     5258        case VMX_VMCS64_GUEST_LDTR_BASE:
     5259        case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
     5260        case VMX_VMCS16_GUEST_FIELD_TR:
     5261        case VMX_VMCS32_GUEST_TR_LIMIT:
     5262        case VMX_VMCS64_GUEST_TR_BASE:
     5263        case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
     5264        case VMX_VMCS32_RO_EXIT_REASON:
     5265        case VMX_VMCS32_RO_VM_INSTR_ERROR:
     5266        case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
     5267        case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERRCODE:
     5268        case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
     5269        case VMX_VMCS32_RO_EXIT_INSTR_INFO:
     5270        case VMX_VMCS_RO_EXIT_QUALIFICATION:
     5271        case VMX_VMCS32_RO_IDT_INFO:
     5272        case VMX_VMCS32_RO_IDT_ERRCODE:
     5273        case VMX_VMCS64_GUEST_CR3:
     5274        case VMX_VMCS_EXIT_PHYS_ADDR_FULL:
     5275            return true;
    49925276    }
    49935277    return false;
    49945278}
    49955279
     5280
    49965281static bool hmR0VmxIsValidWriteField(uint32_t idxField)
    49975282{
    4998     switch(idxField)
    4999     {
    5000     case VMX_VMCS64_GUEST_LDTR_BASE:
    5001     case VMX_VMCS64_GUEST_TR_BASE:
    5002     case VMX_VMCS64_GUEST_GDTR_BASE:
    5003     case VMX_VMCS64_GUEST_IDTR_BASE:
    5004     case VMX_VMCS64_GUEST_SYSENTER_EIP:
    5005     case VMX_VMCS64_GUEST_SYSENTER_ESP:
    5006     case VMX_VMCS64_GUEST_CR0:
    5007     case VMX_VMCS64_GUEST_CR4:
    5008     case VMX_VMCS64_GUEST_CR3:
    5009     case VMX_VMCS64_GUEST_DR7:
    5010     case VMX_VMCS64_GUEST_RIP:
    5011     case VMX_VMCS64_GUEST_RSP:
    5012     case VMX_VMCS64_GUEST_CS_BASE:
    5013     case VMX_VMCS64_GUEST_DS_BASE:
    5014     case VMX_VMCS64_GUEST_ES_BASE:
    5015     case VMX_VMCS64_GUEST_FS_BASE:
    5016     case VMX_VMCS64_GUEST_GS_BASE:
    5017     case VMX_VMCS64_GUEST_SS_BASE:
    5018         return true;
     5283    switch (idxField)
     5284    {
     5285        case VMX_VMCS64_GUEST_LDTR_BASE:
     5286        case VMX_VMCS64_GUEST_TR_BASE:
     5287        case VMX_VMCS64_GUEST_GDTR_BASE:
     5288        case VMX_VMCS64_GUEST_IDTR_BASE:
     5289        case VMX_VMCS64_GUEST_SYSENTER_EIP:
     5290        case VMX_VMCS64_GUEST_SYSENTER_ESP:
     5291        case VMX_VMCS64_GUEST_CR0:
     5292        case VMX_VMCS64_GUEST_CR4:
     5293        case VMX_VMCS64_GUEST_CR3:
     5294        case VMX_VMCS64_GUEST_DR7:
     5295        case VMX_VMCS64_GUEST_RIP:
     5296        case VMX_VMCS64_GUEST_RSP:
     5297        case VMX_VMCS64_GUEST_CS_BASE:
     5298        case VMX_VMCS64_GUEST_DS_BASE:
     5299        case VMX_VMCS64_GUEST_ES_BASE:
     5300        case VMX_VMCS64_GUEST_FS_BASE:
     5301        case VMX_VMCS64_GUEST_GS_BASE:
     5302        case VMX_VMCS64_GUEST_SS_BASE:
     5303            return true;
    50195304    }
    50205305    return false;
    50215306}
    5022 
    50235307# endif /* VBOX_STRICT */
    50245308
     5309
    50255310/**
    5026  * Executes the specified handler in 64 mode
     5311 * Executes the specified handler in 64-bit mode.
    50275312 *
    50285313 * @returns VBox status code.
    5029  * @param   pVM         The VM to operate on.
    5030  * @param   pVCpu       The VMCPU to operate on.
    5031  * @param   pCtx        Guest context
    5032  * @param   pfnHandler  RC handler
    5033  * @param   cbParam     Number of parameters
    5034  * @param   paParam     Array of 32 bits parameters
     5314 * @param   pVM         Pointer to the VM.
     5315 * @param   pVCpu       Pointer to the VMCPU.
     5316 * @param   pCtx        Pointer to the guest CPU context.
     5317 * @param   pfnHandler  Pointer to the RC handler function.
     5318 * @param   cbParam     Number of parameters.
     5319 * @param   paParam     Array of 32-bit parameters.
    50355320 */
    5036 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam, uint32_t *paParam)
     5321VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,
     5322                                         uint32_t *paParam)
    50375323{
    50385324    int             rc, rc2;
     
    50605346    HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
    50615347
    5062     /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */
     5348    /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
    50635349    VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
    50645350
     
    50745360
    50755361    STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
     5362
    50765363    /* Call switcher. */
    50775364    rc = pVM->hwaccm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
     
    50965383    return rc;
    50975384}
    5098 
    50995385#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
    51005386
     
    51025388#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    51035389/**
    5104  * Executes VMWRITE
     5390 * Executes VMWRITE.
    51055391 *
    51065392 * @returns VBox status code
    5107  * @param   pVCpu           The VMCPU to operate on.
    5108  * @param   idxField        VMCS index
    5109  * @param   u64Val          16, 32 or 64 bits value
     5393 * @param   pVCpu           Pointer to the VMCPU.
     5394 * @param   idxField        VMCS field index.
     5395 * @param   u64Val          16, 32 or 64 bits value.
    51105396 */
    51115397VMMR0DECL(int) VMXWriteVMCS64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
    51125398{
    51135399    int rc;
    5114 
    51155400    switch (idxField)
    51165401    {
    5117     case VMX_VMCS_CTRL_TSC_OFFSET_FULL:
    5118     case VMX_VMCS_CTRL_IO_BITMAP_A_FULL:
    5119     case VMX_VMCS_CTRL_IO_BITMAP_B_FULL:
    5120     case VMX_VMCS_CTRL_MSR_BITMAP_FULL:
    5121     case VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL:
    5122     case VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL:
    5123     case VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL:
    5124     case VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL:
    5125     case VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL:
    5126     case VMX_VMCS_GUEST_LINK_PTR_FULL:
    5127     case VMX_VMCS_GUEST_PDPTR0_FULL:
    5128     case VMX_VMCS_GUEST_PDPTR1_FULL:
    5129     case VMX_VMCS_GUEST_PDPTR2_FULL:
    5130     case VMX_VMCS_GUEST_PDPTR3_FULL:
    5131     case VMX_VMCS_GUEST_DEBUGCTL_FULL:
    5132     case VMX_VMCS_GUEST_EFER_FULL:
    5133     case VMX_VMCS_CTRL_EPTP_FULL:
    5134         /* These fields consist of two parts, which are both writable in 32 bits mode. */
    5135         rc  = VMXWriteVMCS32(idxField, u64Val);
    5136         rc |= VMXWriteVMCS32(idxField + 1, (uint32_t)(u64Val >> 32ULL));
    5137         AssertRC(rc);
    5138         return rc;
    5139 
    5140     case VMX_VMCS64_GUEST_LDTR_BASE:
    5141     case VMX_VMCS64_GUEST_TR_BASE:
    5142     case VMX_VMCS64_GUEST_GDTR_BASE:
    5143     case VMX_VMCS64_GUEST_IDTR_BASE:
    5144     case VMX_VMCS64_GUEST_SYSENTER_EIP:
    5145     case VMX_VMCS64_GUEST_SYSENTER_ESP:
    5146     case VMX_VMCS64_GUEST_CR0:
    5147     case VMX_VMCS64_GUEST_CR4:
    5148     case VMX_VMCS64_GUEST_CR3:
    5149     case VMX_VMCS64_GUEST_DR7:
    5150     case VMX_VMCS64_GUEST_RIP:
    5151     case VMX_VMCS64_GUEST_RSP:
    5152     case VMX_VMCS64_GUEST_CS_BASE:
    5153     case VMX_VMCS64_GUEST_DS_BASE:
    5154     case VMX_VMCS64_GUEST_ES_BASE:
    5155     case VMX_VMCS64_GUEST_FS_BASE:
    5156     case VMX_VMCS64_GUEST_GS_BASE:
    5157     case VMX_VMCS64_GUEST_SS_BASE:
    5158         /* Queue a 64 bits value as we can't set it in 32 bits host mode. */
    5159         if (u64Val >> 32ULL)
    5160             rc = VMXWriteCachedVMCSEx(pVCpu, idxField, u64Val);
    5161         else
    5162             rc = VMXWriteVMCS32(idxField, (uint32_t)u64Val);
    5163 
    5164         return rc;
    5165 
    5166     default:
    5167         AssertMsgFailed(("Unexpected field %x\n", idxField));
    5168         return VERR_INVALID_PARAMETER;
     5402        case VMX_VMCS_CTRL_TSC_OFFSET_FULL:
     5403        case VMX_VMCS_CTRL_IO_BITMAP_A_FULL:
     5404        case VMX_VMCS_CTRL_IO_BITMAP_B_FULL:
     5405        case VMX_VMCS_CTRL_MSR_BITMAP_FULL:
     5406        case VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL:
     5407        case VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL:
     5408        case VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL:
     5409        case VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL:
     5410        case VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL:
     5411        case VMX_VMCS_GUEST_LINK_PTR_FULL:
     5412        case VMX_VMCS_GUEST_PDPTR0_FULL:
     5413        case VMX_VMCS_GUEST_PDPTR1_FULL:
     5414        case VMX_VMCS_GUEST_PDPTR2_FULL:
     5415        case VMX_VMCS_GUEST_PDPTR3_FULL:
     5416        case VMX_VMCS_GUEST_DEBUGCTL_FULL:
     5417        case VMX_VMCS_GUEST_EFER_FULL:
     5418        case VMX_VMCS_CTRL_EPTP_FULL:
     5419            /* These fields consist of two parts, which are both writable in 32 bits mode. */
     5420            rc  = VMXWriteVMCS32(idxField, u64Val);
     5421            rc |= VMXWriteVMCS32(idxField + 1, (uint32_t)(u64Val >> 32ULL));
     5422            AssertRC(rc);
     5423            return rc;
     5424
     5425        case VMX_VMCS64_GUEST_LDTR_BASE:
     5426        case VMX_VMCS64_GUEST_TR_BASE:
     5427        case VMX_VMCS64_GUEST_GDTR_BASE:
     5428        case VMX_VMCS64_GUEST_IDTR_BASE:
     5429        case VMX_VMCS64_GUEST_SYSENTER_EIP:
     5430        case VMX_VMCS64_GUEST_SYSENTER_ESP:
     5431        case VMX_VMCS64_GUEST_CR0:
     5432        case VMX_VMCS64_GUEST_CR4:
     5433        case VMX_VMCS64_GUEST_CR3:
     5434        case VMX_VMCS64_GUEST_DR7:
     5435        case VMX_VMCS64_GUEST_RIP:
     5436        case VMX_VMCS64_GUEST_RSP:
     5437        case VMX_VMCS64_GUEST_CS_BASE:
     5438        case VMX_VMCS64_GUEST_DS_BASE:
     5439        case VMX_VMCS64_GUEST_ES_BASE:
     5440        case VMX_VMCS64_GUEST_FS_BASE:
     5441        case VMX_VMCS64_GUEST_GS_BASE:
     5442        case VMX_VMCS64_GUEST_SS_BASE:
     5443            /* Queue a 64 bits value as we can't set it in 32 bits host mode. */
     5444            if (u64Val >> 32ULL)
     5445                rc = VMXWriteCachedVMCSEx(pVCpu, idxField, u64Val);
     5446            else
     5447                rc = VMXWriteVMCS32(idxField, (uint32_t)u64Val);
     5448
     5449            return rc;
     5450
     5451        default:
     5452            AssertMsgFailed(("Unexpected field %x\n", idxField));
     5453            return VERR_INVALID_PARAMETER;
    51695454    }
    51705455}
     5456
    51715457
    51725458/**
    51735459 * Cache VMCS writes for performance reasons (Darwin) and for running 64 bits guests on 32 bits hosts.
    51745460 *
    5175  * @param   pVCpu       The VMCPU to operate on.
    5176  * @param   idxField    VMCS field
    5177  * @param   u64Val      Value
     5461 * @param   pVCpu       Pointer to the VMCPU.
     5462 * @param   idxField    VMCS field index.
     5463 * @param   u64Val      16, 32 or 64 bits value..
    51785464 */
    51795465VMMR0DECL(int) VMXWriteCachedVMCSEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
     
    51815467    PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
    51825468
    5183     AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1, ("entries=%x\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
     5469    AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
     5470                    ("entries=%x\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
    51845471
    51855472    /* Make sure there are no duplicates. */
    5186     for (unsigned i=0;i<pCache->Write.cValidEntries;i++)
     5473    for (unsigned i = 0; i < pCache->Write.cValidEntries; i++)
    51875474    {
    51885475        if (pCache->Write.aField[i] == idxField)
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette