VirtualBox

Changeset 45653 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Apr 19, 2013 10:46:22 PM (12 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
85167
Message:

VMM/VMMR0: HM cleanup.

Location:
trunk/src/VBox/VMM/VMMR0
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r45651 r45653  
    23252325 *
    23262326 * @returns VBox status code.
    2327  * @param   pVM         Pointer to the VM.
    23282327 * @param   pVCpu       Pointer to the VMCPU.
    2329  * @param   pCtx        Pointer to the guest-CPU context.
     2328 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     2329 *                      out-of-sync. Make sure to update the required fields
     2330 *                      before using them.
    23302331 *
    23312332 * @remarks No-long-jump zone!!!
    23322333 */
    2333 DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     2334DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    23342335{
    23352336    int rc = VINF_SUCCESS;
    23362337    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_ENTRY_CTLS)
    23372338    {
     2339        PVM pVM      = pVCpu->CTX_SUFF(pVM);
    23382340        uint32_t val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0;            /* Bits set here must be set in the VMCS. */
    23392341        uint32_t zap = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1;               /* Bits cleared here must be cleared in the VMCS. */
     
    23432345
    23442346        /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
    2345         if (CPUMIsGuestInLongModeEx(pCtx))
     2347        if (CPUMIsGuestInLongModeEx(pMixedCtx))
    23462348            val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST;
    23472349        else
     
    23882390 * @remarks requires EFER.
    23892391 */
    2390 DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     2392DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    23912393{
    23922394    int rc = VINF_SUCCESS;
    23932395    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_EXIT_CTLS)
    23942396    {
     2397        PVM pVM      = pVCpu->CTX_SUFF(pVM);
    23952398        uint32_t val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0;            /* Bits set here must be set in the VMCS. */
    23962399        uint32_t zap = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1;               /* Bits cleared here must be cleared in the VMCS. */
     
    24482451 * @param   pVM         Pointer to the VM.
    24492452 * @param   pVCpu       Pointer to the VMCPU.
    2450  * @param   pCtx        Pointer to the guest-CPU context.
    2451  */
    2452 DECLINLINE(int) hmR0VmxLoadGuestApicState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     2453 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     2454 *                      out-of-sync. Make sure to update the required fields
     2455 *                      before using them.
     2456 */
     2457DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    24532458{
    24542459    int rc = VINF_SUCCESS;
     
    24712476             * the interrupt when we VM-exit for other reasons.
    24722477             */
    2473             pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8GuestTpr;      /* Offset 0x80 is TPR in the APIC MMIO range. */
     2478            pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8GuestTpr;       /* Offset 0x80 is TPR in the APIC MMIO range. */
    24742479            /* Bits 3-0 of the TPR threshold field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
    24752480            uint32_t u32TprThreshold = fPendingIntr ? (u8GuestTpr >> 4) : 0;
    2476             Assert(!(u32TprThreshold & 0xfffffff0));            /* Bits 31:4 MBZ. */
     2481            Assert(!(u32TprThreshold & 0xfffffff0));             /* Bits 31:4 MBZ. */
    24772482
    24782483            rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
     
    24802485
    24812486            /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
    2482             if (pVM->hm.s.fTPRPatchingActive)
     2487            if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive)
    24832488            {
    2484                 Assert(!CPUMIsGuestInLongModeEx(pCtx));     /* EFER always up-to-date. */
    2485                 pCtx->msrLSTAR = u8GuestTpr;
     2489                Assert(!CPUMIsGuestInLongModeEx(pMixedCtx));     /* EFER always up-to-date. */
     2490                pMixedCtx->msrLSTAR = u8GuestTpr;
    24862491                if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
    24872492                {
     
    25642569 *
    25652570 * @returns VBox status code.
     2571 * @param   pVCpu       Pointer to the VMCPU.
     2572 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     2573 *                      out-of-sync. Make sure to update the required fields
     2574 *                      before using them.
     2575 *
     2576 * @remarks No-long-jump zone!!!
     2577 */
     2578DECLINLINE(int) hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     2579{
     2580    int rc = VINF_SUCCESS;
     2581    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RIP)
     2582    {
     2583        rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
     2584        AssertRCReturn(rc, rc);
     2585        Log(("Load: VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip));
     2586        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP;
     2587    }
     2588    return rc;
     2589}
     2590
     2591
     2592/**
     2593 * Loads the guest's RSP into the guest-state area in the VMCS.
     2594 *
     2595 * @returns VBox status code.
     2596 * @param   pVCpu       Pointer to the VMCPU.
     2597 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     2598 *                      out-of-sync. Make sure to update the required fields
     2599 *                      before using them.
     2600 *
     2601 * @remarks No-long-jump zone!!!
     2602 */
     2603DECLINLINE(int) hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     2604{
     2605    int rc = VINF_SUCCESS;
     2606    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RSP)
     2607    {
     2608        rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
     2609        AssertRCReturn(rc, rc);
     2610        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP;
     2611    }
     2612    return rc;
     2613}
     2614
     2615
     2616/**
     2617 * Loads the guest's RFLAGS into the guest-state area in the VMCS.
     2618 *
     2619 * @returns VBox status code.
     2620 * @param   pVCpu       Pointer to the VMCPU.
     2621 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     2622 *                      out-of-sync. Make sure to update the required fields
     2623 *                      before using them.
     2624 *
     2625 * @remarks No-long-jump zone!!!
     2626 */
     2627DECLINLINE(int) hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     2628{
     2629    int rc = VINF_SUCCESS;
     2630    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
     2631    {
     2632        /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
     2633           Let us assert it as such and use native-width VMWRITE. */
     2634        X86RFLAGS uRFlags = pMixedCtx->rflags;
     2635        Assert(uRFlags.u64 >> 32 == 0);
     2636        uRFlags.u64 &= VMX_EFLAGS_RESERVED_0;                   /* Bits 22-31, 15, 5 & 3 MBZ. */
     2637        uRFlags.u64 |= VMX_EFLAGS_RESERVED_1;                   /* Bit 1 MB1. */
     2638
     2639        /*
     2640         * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM exit.
     2641         * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
     2642         */
     2643        if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     2644        {
     2645            Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
     2646            Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
     2647            pVCpu->hm.s.vmx.RealMode.eflags.u32 = uRFlags.u64; /* Save the original eflags of the real-mode guest. */
     2648            uRFlags.Bits.u1VM   = 1;                           /* Set the Virtual 8086 mode bit. */
     2649            uRFlags.Bits.u2IOPL = 0;                           /* Change IOPL to 0, otherwise certain instructions won't fault. */
     2650        }
     2651
     2652        rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RFLAGS, uRFlags.u64);
     2653        AssertRCReturn(rc, rc);
     2654
     2655        Log(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX64\n", uRFlags.u64));
     2656        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;
     2657    }
     2658    return rc;
     2659}
     2660
     2661
     2662/**
     2663 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
     2664 *
     2665 * @returns VBox status code.
     2666 * @param   pVCpu       Pointer to the VMCPU.
     2667 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     2668 *                      out-of-sync. Make sure to update the required fields
     2669 *                      before using them.
     2670 *
     2671 * @remarks No-long-jump zone!!!
     2672 */
     2673DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     2674{
     2675    int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
     2676    rc    |= hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
     2677    rc    |= hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
     2678    return rc;
     2679}
     2680
     2681
     2682/**
     2683 * Loads the guest control registers (CR0, CR3, CR4) into the guest-state area
     2684 * in the VMCS.
     2685 *
     2686 * @returns VBox status code.
    25662687 * @param   pVM         Pointer to the VM.
    25672688 * @param   pVCpu       Pointer to the VMCPU.
     
    25722693 * @remarks No-long-jump zone!!!
    25732694 */
    2574 DECLINLINE(int) hmR0VmxLoadGuestRip(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    2575 {
    2576     int rc = VINF_SUCCESS;
    2577     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RIP)
    2578     {
    2579         rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
    2580         AssertRCReturn(rc, rc);
    2581         Log(("Load: VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip));
    2582         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP;
    2583     }
    2584     return rc;
    2585 }
    2586 
    2587 
    2588 /**
    2589  * Loads the guest's RSP into the guest-state area in the VMCS.
    2590  *
    2591  * @returns VBox status code.
    2592  * @param   pVM         Pointer to the VM.
    2593  * @param   pVCpu       Pointer to the VMCPU.
    2594  * @param   pCtx        Pointer to the guest-CPU context.
    2595  *
    2596  * @remarks No-long-jump zone!!!
    2597  */
    2598 DECLINLINE(int) hmR0VmxLoadGuestRsp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    2599 {
    2600     int rc = VINF_SUCCESS;
    2601     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RSP)
    2602     {
    2603         rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
    2604         AssertRCReturn(rc, rc);
    2605         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP;
    2606     }
    2607     return rc;
    2608 }
    2609 
    2610 
    2611 /**
    2612  * Loads the guest's RFLAGS into the guest-state area in the VMCS.
    2613  *
    2614  * @returns VBox status code.
    2615  * @param   pVM         Pointer to the VM.
    2616  * @param   pVCpu       Pointer to the VMCPU.
    2617  * @param   pCtx        Pointer to the guest-CPU context.
    2618  *
    2619  * @remarks No-long-jump zone!!!
    2620  */
    2621 DECLINLINE(int) hmR0VmxLoadGuestRflags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    2622 {
    2623     int rc = VINF_SUCCESS;
    2624     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
    2625     {
    2626         /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
    2627            Let us assert it as such and use native-width VMWRITE. */
    2628         X86RFLAGS uRFlags = pCtx->rflags;
    2629         Assert(uRFlags.u64 >> 32 == 0);
    2630         uRFlags.u64 &= VMX_EFLAGS_RESERVED_0;                   /* Bits 22-31, 15, 5 & 3 MBZ. */
    2631         uRFlags.u64 |= VMX_EFLAGS_RESERVED_1;                   /* Bit 1 MB1. */
    2632 
    2633         /*
    2634          * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM exit.
    2635          * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
    2636          */
    2637         if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    2638         {
    2639             Assert(pVM->hm.s.vmx.pRealModeTSS);
    2640             Assert(PDMVmmDevHeapIsEnabled(pVM));
    2641             pVCpu->hm.s.vmx.RealMode.eflags.u32 = uRFlags.u64; /* Save the original eflags of the real-mode guest. */
    2642             uRFlags.Bits.u1VM   = 1;                           /* Set the Virtual 8086 mode bit. */
    2643             uRFlags.Bits.u2IOPL = 0;                           /* Change IOPL to 0, otherwise certain instructions won't fault. */
    2644         }
    2645 
    2646         rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RFLAGS, uRFlags.u64);
    2647         AssertRCReturn(rc, rc);
    2648 
    2649         Log(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX64\n", uRFlags.u64));
    2650         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;
    2651     }
    2652     return rc;
    2653 }
    2654 
    2655 
    2656 /**
    2657  * Loads the guest's general purpose registers (GPRs) - RIP, RSP and RFLAGS
    2658  * into the guest-state area in the VMCS. The remaining GPRs are handled in the
    2659  * assembly code.
    2660  *
    2661  * @returns VBox status code.
    2662  * @param   pVM         Pointer to the VM.
    2663  * @param   pVCpu       Pointer to the VMCPU.
    2664  * @param   pCtx        Pointer to the guest-CPU context.
    2665  *
    2666  * @remarks No-long-jump zone!!!
    2667  */
    2668 DECLINLINE(int) hmR0VmxLoadGuestGprs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    2669 {
    2670     LogFlowFunc(("pVM=%p pVCpu=%p pCtx=%p\n", pVM, pVCpu, pCtx));
    2671     int rc = hmR0VmxLoadGuestRip(pVM, pVCpu, pCtx);
    2672     rc    |= hmR0VmxLoadGuestRsp(pVM, pVCpu, pCtx);
    2673     rc    |= hmR0VmxLoadGuestRflags(pVM, pVCpu, pCtx);
    2674     return rc;
    2675 }
    2676 
    2677 
    2678 /**
    2679  * Loads the guest control registers (CR0, CR3, CR4) into the guest-state area
    2680  * in the VMCS.
    2681  *
    2682  * @returns VBox status code.
    2683  * @param   pVM         Pointer to the VM.
    2684  * @param   pVCpu       Pointer to the VMCPU.
    2685  * @param   pCtx        Pointer to the guest-CPU context.
    2686  *
    2687  * @remarks No-long-jump zone!!!
    2688  */
    2689 DECLINLINE(int) hmR0VmxLoadGuestControlRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    2690 {
    2691     int rc = VINF_SUCCESS;
     2695DECLINLINE(int) hmR0VmxLoadGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
     2696{
     2697    int rc  = VINF_SUCCESS;
     2698    PVM pVM = pVCpu->CTX_SUFF(pVM);
    26922699
    26932700    /*
     
    30283035 *
    30293036 * @returns VBox status code.
    3030  * @param   pVM         Pointer to the VM.
    30313037 * @param   pVCpu       Pointer to the VMCPU.
    3032  * @param   pCtx        Pointer to the guest-CPU context.
     3038 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     3039 *                      out-of-sync. Make sure to update the required fields
     3040 *                      before using them.
    30333041 *
    30343042 * @remarks No-long-jump zone!!!
    30353043 */
    3036 DECLINLINE(int) hmR0VmxLoadGuestDebugRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     3044DECLINLINE(int) hmR0VmxLoadGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    30373045{
    30383046    if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
     
    30433051    if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
    30443052    {
    3045         Assert((pCtx->dr[7] & 0xffffffff00000000ULL) == 0);  /* upper 32 bits are reserved (MBZ). */
     3053        Assert((pMixedCtx->dr[7] & 0xffffffff00000000ULL) == 0);  /* upper 32 bits are reserved (MBZ). */
    30463054        /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
    3047         Assert((pCtx->dr[7] & 0xd800) == 0);                 /* bits 15, 14, 12, 11 are reserved (MBZ). */
    3048         Assert((pCtx->dr[7] & 0x400) == 0x400);              /* bit 10 is reserved (MB1). */
     3055        Assert((pMixedCtx->dr[7] & 0xd800) == 0);                 /* bits 15, 14, 12, 11 are reserved (MBZ). */
     3056        Assert((pMixedCtx->dr[7] & 0x400) == 0x400);              /* bit 10 is reserved (MB1). */
    30493057    }
    30503058#endif
    30513059
    30523060    int rc                = VERR_INTERNAL_ERROR_5;
     3061    PVM pVM               = pVCpu->CTX_SUFF(pVM);
    30533062    bool fInterceptDB     = false;
    30543063    bool fInterceptMovDRx = false;
     
    30713080        if (!CPUMIsHyperDebugStateActive(pVCpu))
    30723081        {
    3073             rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
     3082            rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
    30743083            AssertRC(rc);
    30753084        }
     
    30773086        fInterceptMovDRx = true;
    30783087    }
    3079     else if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
     3088    else if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
    30803089    {
    30813090        if (!CPUMIsGuestDebugStateActive(pVCpu))
    30823091        {
    3083             rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
     3092            rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
    30843093            AssertRC(rc);
    30853094            STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
     
    31143123
    31153124    /* The guest's view of its DR7 is unblemished. */
    3116     rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
     3125    rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
    31173126
    31183127    pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
     
    32893298 *
    32903299 * @returns VBox status code.
    3291  * @param   pVM         Pointer to the VM.
    32923300 * @param   pVCpu       Pointer to the VMCPU.
    32933301 * @param   idxSel      Index of the selector in the VMCS.
     
    33003308 * @remarks No-long-jump zone!!!
    33013309 */
    3302 DECLINLINE(int) hmR0VmxWriteSegmentReg(PVM pVM, PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
     3310DECLINLINE(int) hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
    33033311                                       uint32_t idxAccess, PCPUMSELREG pSelReg, PCPUMCTX pCtx)
    33043312{
     
    33143322        /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
    33153323        u32Access = 0xf3;
    3316         Assert(pVM->hm.s.vmx.pRealModeTSS);
    3317         Assert(PDMVmmDevHeapIsEnabled(pVM));
     3324        Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
     3325        Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
    33183326    }
    33193327    else
     
    33463354 * @param   pVM         Pointer to the VM.
    33473355 * @param   pVCPU       Pointer to the VMCPU.
    3348  * @param   pCtx        Pointer to the guest-CPU context.
     3356 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     3357 *                      out-of-sync. Make sure to update the required fields
     3358 *                      before using them.
    33493359 *
    33503360 * @remarks No-long-jump zone!!!
    33513361 */
    3352 DECLINLINE(int) hmR0VmxLoadGuestSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    3353 {
    3354     int rc = VERR_INTERNAL_ERROR_5;
     3362DECLINLINE(int) hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     3363{
     3364    int rc  = VERR_INTERNAL_ERROR_5;
     3365    PVM pVM = pVCpu->CTX_SUFF(pVM);
    33553366
    33563367    /*
     
    33623373        if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    33633374        {
    3364             pVCpu->hm.s.vmx.RealMode.uAttrCS.u = pCtx->cs.Attr.u;
    3365             pVCpu->hm.s.vmx.RealMode.uAttrSS.u = pCtx->ss.Attr.u;
    3366             pVCpu->hm.s.vmx.RealMode.uAttrDS.u = pCtx->ds.Attr.u;
    3367             pVCpu->hm.s.vmx.RealMode.uAttrES.u = pCtx->es.Attr.u;
    3368             pVCpu->hm.s.vmx.RealMode.uAttrFS.u = pCtx->fs.Attr.u;
    3369             pVCpu->hm.s.vmx.RealMode.uAttrGS.u = pCtx->gs.Attr.u;
     3375            pVCpu->hm.s.vmx.RealMode.uAttrCS.u = pMixedCtx->cs.Attr.u;
     3376            pVCpu->hm.s.vmx.RealMode.uAttrSS.u = pMixedCtx->ss.Attr.u;
     3377            pVCpu->hm.s.vmx.RealMode.uAttrDS.u = pMixedCtx->ds.Attr.u;
     3378            pVCpu->hm.s.vmx.RealMode.uAttrES.u = pMixedCtx->es.Attr.u;
     3379            pVCpu->hm.s.vmx.RealMode.uAttrFS.u = pMixedCtx->fs.Attr.u;
     3380            pVCpu->hm.s.vmx.RealMode.uAttrGS.u = pMixedCtx->gs.Attr.u;
    33703381        }
    33713382
     
    33903401        }
    33913402#endif
    3392         rc =  hmR0VmxWriteSegmentReg(pVM, pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
    3393                                      VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pCtx->cs, pCtx);
    3394         rc |= hmR0VmxWriteSegmentReg(pVM, pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
    3395                                       VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pCtx->ss, pCtx);
    3396         rc |= hmR0VmxWriteSegmentReg(pVM, pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
    3397                                       VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pCtx->ds, pCtx);
    3398         rc |= hmR0VmxWriteSegmentReg(pVM, pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
    3399                                       VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pCtx->es, pCtx);
    3400         rc |= hmR0VmxWriteSegmentReg(pVM, pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
    3401                                       VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pCtx->fs, pCtx);
    3402         rc |= hmR0VmxWriteSegmentReg(pVM, pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
    3403                                       VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pCtx->gs, pCtx);
     3403        rc =  hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
     3404                                     VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs, pMixedCtx);
     3405        rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
     3406                                     VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss, pMixedCtx);
     3407        rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
     3408                                     VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds, pMixedCtx);
     3409        rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
     3410                                     VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es, pMixedCtx);
     3411        rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
     3412                                     VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs, pMixedCtx);
     3413        rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
     3414                                     VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs, pMixedCtx);
    34043415        AssertRCReturn(rc, rc);
    34053416
    34063417#ifdef VBOX_STRICT
    3407         hmR0VmxValidateSegmentRegs(pVM, pVCpu, pCtx);
     3418        hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
    34083419#endif
    34093420        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
     
    34273438        if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    34283439        {
    3429             u16Sel          = pCtx->tr.Sel;
    3430             u32Limit        = pCtx->tr.u32Limit;
    3431             u64Base         = pCtx->tr.u64Base;
    3432             u32AccessRights = pCtx->tr.Attr.u;
     3440            u16Sel          = pMixedCtx->tr.Sel;
     3441            u32Limit        = pMixedCtx->tr.u32Limit;
     3442            u64Base         = pMixedCtx->tr.u64Base;
     3443            u32AccessRights = pMixedCtx->tr.Attr.u;
    34333444        }
    34343445        else
     
    34583469                  || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
    34593470        AssertMsg(!(u32AccessRights & HMVMX_SEL_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
    3460         Assert(!(u32AccessRights & RT_BIT(4)));           /* System MBZ.*/
    3461         Assert(u32AccessRights & RT_BIT(7));              /* Present MB1.*/
    3462         Assert(!(u32AccessRights & 0xf00));               /* 11:8 MBZ. */
    3463         Assert(!(u32AccessRights & 0xfffe0000));          /* 31:17 MBZ. */
     3471        Assert(!(u32AccessRights & RT_BIT(4)));                 /* System MBZ.*/
     3472        Assert(u32AccessRights & RT_BIT(7));                    /* Present MB1.*/
     3473        Assert(!(u32AccessRights & 0xf00));                     /* 11:8 MBZ. */
     3474        Assert(!(u32AccessRights & 0xfffe0000));                /* 31:17 MBZ. */
    34643475        Assert(   (u32Limit & 0xfff) == 0xfff
    3465                || !(u32AccessRights & RT_BIT(15)));       /* Granularity MBZ. */
    3466         Assert(   !(pCtx->tr.u32Limit & 0xfff00000)
    3467                || (u32AccessRights & RT_BIT(15)));        /* Granularity MB1. */
     3476               || !(u32AccessRights & RT_BIT(15)));             /* Granularity MBZ. */
     3477        Assert(   !(pMixedCtx->tr.u32Limit & 0xfff00000)
     3478               || (u32AccessRights & RT_BIT(15)));              /* Granularity MB1. */
    34683479
    34693480        rc  = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR,         u16Sel);
     
    34823493    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
    34833494    {
    3484         rc  = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt);
    3485         rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE,  pCtx->gdtr.pGdt);
     3495        rc  = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt);
     3496        rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE,  pMixedCtx->gdtr.pGdt);
    34863497        AssertRCReturn(rc, rc);
    34873498
    3488         Assert(!(pCtx->gdtr.cbGdt & 0xffff0000ULL));      /* Bits 31:16 MBZ. */
    3489         Log(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pCtx->gdtr.pGdt));
     3499        Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000ULL));      /* Bits 31:16 MBZ. */
     3500        Log(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt));
    34903501        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
    34913502    }
     
    34983509        /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
    34993510        uint32_t u32Access = 0;
    3500         if (!pCtx->ldtr.Attr.u)
     3511        if (!pMixedCtx->ldtr.Attr.u)
    35013512            u32Access = HMVMX_SEL_UNUSABLE;
    35023513        else
    3503             u32Access = pCtx->ldtr.Attr.u;
    3504 
    3505         rc  = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR,         pCtx->ldtr.Sel);
    3506         rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT,         pCtx->ldtr.u32Limit);
    3507         rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE,          pCtx->ldtr.u64Base);
     3514            u32Access = pMixedCtx->ldtr.Attr.u;
     3515
     3516        rc  = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR,         pMixedCtx->ldtr.Sel);
     3517        rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT,         pMixedCtx->ldtr.u32Limit);
     3518        rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE,          pMixedCtx->ldtr.u64Base);
    35083519        rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access);
    35093520        AssertRCReturn(rc, rc);
     
    35123523        if (!(u32Access & HMVMX_SEL_UNUSABLE))
    35133524        {
    3514             Assert(!(pCtx->ldtr.Sel & RT_BIT(2)));              /* TI MBZ. */
    3515             Assert(pCtx->ldtr.Attr.n.u4Type == 2);              /* Type MB2 (LDT). */
    3516             Assert(!pCtx->ldtr.Attr.n.u1DescType);              /* System MBZ. */
    3517             Assert(pCtx->ldtr.Attr.n.u1Present == 1);           /* Present MB1. */
    3518             Assert(!pCtx->ldtr.Attr.n.u4LimitHigh);             /* 11:8 MBZ. */
    3519             Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000));          /* 31:17 MBZ. */
    3520             Assert(   (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
    3521                    || !pCtx->ldtr.Attr.n.u1Granularity);        /* Granularity MBZ. */
    3522             Assert(   !(pCtx->ldtr.u32Limit & 0xfff00000)
    3523                    || pCtx->ldtr.Attr.n.u1Granularity);         /* Granularity MB1. */
    3524         }
    3525 
    3526         Log(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n",  pCtx->ldtr.u64Base));
     3525            Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2)));              /* TI MBZ. */
     3526            Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2);              /* Type MB2 (LDT). */
     3527            Assert(!pMixedCtx->ldtr.Attr.n.u1DescType);              /* System MBZ. */
     3528            Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1);           /* Present MB1. */
     3529            Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh);             /* 11:8 MBZ. */
     3530            Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000));          /* 31:17 MBZ. */
     3531            Assert(   (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
     3532                   || !pMixedCtx->ldtr.Attr.n.u1Granularity);        /* Granularity MBZ. */
     3533            Assert(   !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
     3534                   || pMixedCtx->ldtr.Attr.n.u1Granularity);         /* Granularity MB1. */
     3535        }
     3536
     3537        Log(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n",  pMixedCtx->ldtr.u64Base));
    35273538        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;
    35283539    }
     
    35333544    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
    35343545    {
    3535         rc  = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt);
    3536         rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE,  pCtx->idtr.pIdt);
     3546        rc  = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt);
     3547        rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE,  pMixedCtx->idtr.pIdt);
    35373548        AssertRCReturn(rc, rc);
    35383549
    3539         Assert(!(pCtx->idtr.cbIdt & 0xffff0000ULL));      /* Bits 31:16 MBZ. */
    3540         Log(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pCtx->idtr.pIdt));
     3550        Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000ULL));      /* Bits 31:16 MBZ. */
     3551        Log(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt));
    35413552        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
    35423553    }
     
    35533564 *
    35543565 * @returns VBox status code.
    3555  * @param   pVM         Pointer to the VM.
    35563566 * @param   pVCpu       Pointer to the VMCPU.
    3557  * @param   pCtx        Pointer to the guest-CPU context.
     3567 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     3568 *                      out-of-sync. Make sure to update the required fields
     3569 *                      before using them.
    35583570 *
    35593571 * @remarks No-long-jump zone!!!
    35603572 */
    3561 DECLINLINE(int) hmR0VmxLoadGuestMsrs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     3573DECLINLINE(int) hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    35623574{
    35633575    AssertPtr(pVCpu);
     
    35703582    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
    35713583    {
     3584        PVM pVM             = pVCpu->CTX_SUFF(pVM);
    35723585        PVMXMSR  pGuestMsr  = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    35733586        uint32_t cGuestMsrs = 0;
     
    35833596            pGuestMsr->u32IndexMSR = MSR_K6_EFER;
    35843597            pGuestMsr->u32Reserved = 0;
    3585             pGuestMsr->u64Value    = pCtx->msrEFER;
     3598            pGuestMsr->u64Value    = pMixedCtx->msrEFER;
    35863599            /* VT-x will complain if only MSR_K6_EFER_LME is set. See Intel spec. 26.4 "Loading MSRs" for details. */
    3587             if (!CPUMIsGuestInLongModeEx(pCtx))
     3600            if (!CPUMIsGuestInLongModeEx(pMixedCtx))
    35883601                pGuestMsr->u64Value &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
    35893602            pGuestMsr++; cGuestMsrs++;
     
    35923605                pGuestMsr->u32IndexMSR = MSR_K8_LSTAR;
    35933606                pGuestMsr->u32Reserved = 0;
    3594                 pGuestMsr->u64Value    = pCtx->msrLSTAR;           /* 64 bits mode syscall rip */
     3607                pGuestMsr->u64Value    = pMixedCtx->msrLSTAR;           /* 64 bits mode syscall rip */
    35953608                pGuestMsr++; cGuestMsrs++;
    35963609                pGuestMsr->u32IndexMSR = MSR_K6_STAR;
    35973610                pGuestMsr->u32Reserved = 0;
    3598                 pGuestMsr->u64Value    = pCtx->msrSTAR;            /* legacy syscall eip, cs & ss */
     3611                pGuestMsr->u64Value    = pMixedCtx->msrSTAR;            /* legacy syscall eip, cs & ss */
    35993612                pGuestMsr++; cGuestMsrs++;
    36003613                pGuestMsr->u32IndexMSR = MSR_K8_SF_MASK;
    36013614                pGuestMsr->u32Reserved = 0;
    3602                 pGuestMsr->u64Value    = pCtx->msrSFMASK;          /* syscall flag mask */
     3615                pGuestMsr->u64Value    = pMixedCtx->msrSFMASK;          /* syscall flag mask */
    36033616                pGuestMsr++; cGuestMsrs++;
    36043617                /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208}  */
     
    36063619                pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
    36073620                pGuestMsr->u32Reserved = 0;
    3608                 pGuestMsr->u64Value    = pCtx->msrKERNELGSBASE;    /* swapgs exchange value */
     3621                pGuestMsr->u64Value    = pMixedCtx->msrKERNELGSBASE;    /* swapgs exchange value */
    36093622                pGuestMsr++; cGuestMsrs++;
    36103623#endif
     
    36493662    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
    36503663    {
    3651         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS,   pCtx->SysEnter.cs);
     3664        rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS,   pMixedCtx->SysEnter.cs);
    36523665        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_CS_MSR;
    36533666    }
    36543667    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
    36553668    {
    3656         rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip);
     3669        rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip);
    36573670        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR;
    36583671    }
    36593672    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
    36603673    {
    3661         rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp);
     3674        rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp);
    36623675        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR;
    36633676    }
     
    36723685 *
    36733686 * @returns VBox status code.
    3674  * @param   pVM         Pointer to the VM.
    36753687 * @param   pVCpu       Pointer to the VMCPU.
    3676  * @param   pCtx        Pointer to the guest-CPU context.
     3688 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     3689 *                      out-of-sync. Make sure to update the required fields
     3690 *                      before using them.
    36773691 *
    36783692 * @remarks No-long-jump zone!!!
    36793693 */
    3680 DECLINLINE(int) hmR0VmxLoadGuestActivityState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     3694DECLINLINE(int) hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
    36813695{
    36823696    /** @todo See if we can make use of other states, e.g.
     
    36973711 *
    36983712 * @returns VBox status code.
    3699  * @param   pVM         Pointer to the VM.
    37003713 * @param   pVCpu       Pointer to the VMCPU.
    3701  * @param   pCtx        Pointer to the guest-CPU context.
     3714 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     3715 *                      out-of-sync. Make sure to update the required fields
     3716 *                      before using them.
    37023717 *
    37033718 * @remarks No-long-jump zone!!!
    37043719 */
    3705 DECLINLINE(int) hmR0VmxSetupVMRunHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    3706 {
    3707     if (CPUMIsGuestInLongModeEx(pCtx))
     3720DECLINLINE(int) hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     3721{
     3722    if (CPUMIsGuestInLongModeEx(pMixedCtx))
    37083723    {
    37093724#ifndef VBOX_ENABLE_64_BITS_GUESTS
    37103725        return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    37113726#endif
    3712         Assert(pVM->hm.s.fAllow64BitGuests);    /* Guaranteed by hmR3InitFinalizeR0(). */
     3727        Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);    /* Guaranteed by hmR3InitFinalizeR0(). */
    37133728#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    37143729        /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
     
    45114526 *
    45124527 * @returns VBox status code.
    4513  * @param   pVM             Pointer to the VM.
    45144528 * @param   pVCpu           Pointer to the VMCPU.
    45154529 * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
     
    45184532 * @remarks No-long-jump zone!!!
    45194533 */
    4520 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    4521 {
    4522     int rc = VERR_INTERNAL_ERROR_5;
     4534static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     4535{
     4536    int  rc            = VERR_INTERNAL_ERROR_5;
    45234537    bool fOffsettedTsc = false;
     4538    PVM pVM            = pVCpu->CTX_SUFF(pVM);
    45244539    if (pVM->hm.s.vmx.fUsePreemptTimer)
    45254540    {
     
    61496164    /* Cannot inject an NMI when block-by-MOV SS is in effect. */
    61506165    Assert(   uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
    6151            || !((*puIntrState) & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
     6166           || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
    61526167
    61536168    /* We require CR0 to check if the guest is in real-mode. */
     
    63866401 * @param   pVM         Pointer to the VM.
    63876402 * @param   pVCpu       Pointer to the VMCPU.
    6388  * @param   pCtx        Pointer to the guest-CPU context.
     6403 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     6404 *                      out-of-sync. Make sure to update the required fields
     6405 *                      before using them.
    63896406 *
    63906407 * @remarks No-long-jump zone!!!
    63916408 */
    6392 VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     6409VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    63936410{
    63946411    AssertPtr(pVM);
    63956412    AssertPtr(pVCpu);
    6396     AssertPtr(pCtx);
     6413    AssertPtr(pMixedCtx);
    63976414    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    63986415
     
    64046421    pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
    64056422    if (   !pVM->hm.s.vmx.fUnrestrictedGuest
    6406         && CPUMIsGuestInRealModeEx(pCtx))
     6423        && CPUMIsGuestInRealModeEx(pMixedCtx))
    64076424    {
    64086425        pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
    64096426    }
    64106427
    6411     int rc = hmR0VmxLoadGuestEntryCtls(pVM, pVCpu, pCtx);
     6428    int rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
    64126429    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    64136430
    6414     rc = hmR0VmxLoadGuestExitCtls(pVM, pVCpu, pCtx);
     6431    rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
    64156432    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    64166433
    6417     rc = hmR0VmxLoadGuestActivityState(pVM, pVCpu, pCtx);
     6434    rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
    64186435    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    64196436
    6420     rc = hmR0VmxLoadGuestControlRegs(pVM, pVCpu, pCtx);
     6437    rc = hmR0VmxLoadGuestControlRegs(pVCpu, pMixedCtx);
    64216438    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestControlRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    64226439
    6423     rc = hmR0VmxLoadGuestSegmentRegs(pVM, pVCpu, pCtx);
     6440    rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
    64246441    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    64256442
    6426     rc = hmR0VmxLoadGuestDebugRegs(pVM, pVCpu, pCtx);
     6443    rc = hmR0VmxLoadGuestDebugRegs(pVCpu, pMixedCtx);
    64276444    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestDebugRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    64286445
    6429     rc = hmR0VmxLoadGuestMsrs(pVM, pVCpu, pCtx);
     6446    rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
    64306447    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    64316448
    6432     rc = hmR0VmxLoadGuestApicState(pVM, pVCpu, pCtx);
     6449    rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
    64336450    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    64346451
    6435     rc = hmR0VmxLoadGuestGprs(pVM, pVCpu, pCtx);
     6452    rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
    64366453    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestGprs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    64376454
    6438     rc = hmR0VmxSetupVMRunHandler(pVM, pVCpu, pCtx);
     6455    rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
    64396456    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    64406457
     
    65686585    if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP)
    65696586    {
    6570         rc = hmR0VmxLoadGuestRip(pVM, pVCpu, pMixedCtx);
     6587        rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
    65716588        STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
    65726589    }
     
    65896606    if (pVmxTransient->fUpdateTscOffsettingAndPreemptTimer)
    65906607    {
    6591         hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVM, pVCpu, pMixedCtx);
     6608        hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pMixedCtx);
    65926609        pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
    65936610    }
     
    69516968    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    69526969    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
     6970#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     6971    Assert(ASMIntAreEnabled());
    69536972    return VINF_SUCCESS;
     6973#else
     6974    return VINF_EM_RAW_INTERRUPT;
     6975#endif
    69546976}
    69556977
     
    85748596
    85758597    int rc = VERR_INTERNAL_ERROR_5;
    8576 
    85778598    if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    85788599    {
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r45531 r45653  
    29412941    }
    29422942
    2943     Log2(("\nE"));
     2943    Log2(("\n"));
    29442944
    29452945    /* This is not ideal, but if we don't clear the event injection in the VMCS right here,
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette