Changeset 45653 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Apr 19, 2013 10:46:22 PM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 85167
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r45651 r45653 2325 2325 * 2326 2326 * @returns VBox status code. 2327 * @param pVM Pointer to the VM.2328 2327 * @param pVCpu Pointer to the VMCPU. 2329 * @param pCtx Pointer to the guest-CPU context. 2328 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 2329 * out-of-sync. Make sure to update the required fields 2330 * before using them. 2330 2331 * 2331 2332 * @remarks No-long-jump zone!!! 2332 2333 */ 2333 DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)2334 DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 2334 2335 { 2335 2336 int rc = VINF_SUCCESS; 2336 2337 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_ENTRY_CTLS) 2337 2338 { 2339 PVM pVM = pVCpu->CTX_SUFF(pVM); 2338 2340 uint32_t val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0; /* Bits set here must be set in the VMCS. */ 2339 2341 uint32_t zap = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ … … 2343 2345 2344 2346 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */ 2345 if (CPUMIsGuestInLongModeEx(p Ctx))2347 if (CPUMIsGuestInLongModeEx(pMixedCtx)) 2346 2348 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST; 2347 2349 else … … 2388 2390 * @remarks requires EFER. 2389 2391 */ 2390 DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)2392 DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 2391 2393 { 2392 2394 int rc = VINF_SUCCESS; 2393 2395 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_EXIT_CTLS) 2394 2396 { 2397 PVM pVM = pVCpu->CTX_SUFF(pVM); 2395 2398 uint32_t val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0; /* Bits set here must be set in the VMCS. */ 2396 2399 uint32_t zap = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ … … 2448 2451 * @param pVM Pointer to the VM. 2449 2452 * @param pVCpu Pointer to the VMCPU. 2450 * @param pCtx Pointer to the guest-CPU context. 2451 */ 2452 DECLINLINE(int) hmR0VmxLoadGuestApicState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 2453 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 2454 * out-of-sync. Make sure to update the required fields 2455 * before using them. 2456 */ 2457 DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 2453 2458 { 2454 2459 int rc = VINF_SUCCESS; … … 2471 2476 * the interrupt when we VM-exit for other reasons. 2472 2477 */ 2473 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8GuestTpr; /* Offset 0x80 is TPR in the APIC MMIO range. */2478 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8GuestTpr; /* Offset 0x80 is TPR in the APIC MMIO range. */ 2474 2479 /* Bits 3-0 of the TPR threshold field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */ 2475 2480 uint32_t u32TprThreshold = fPendingIntr ? (u8GuestTpr >> 4) : 0; 2476 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */2481 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */ 2477 2482 2478 2483 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold); … … 2480 2485 2481 2486 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */ 2482 if (pV M->hm.s.fTPRPatchingActive)2487 if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive) 2483 2488 { 2484 Assert(!CPUMIsGuestInLongModeEx(p Ctx)); /* EFER always up-to-date. */2485 p Ctx->msrLSTAR = u8GuestTpr;2489 Assert(!CPUMIsGuestInLongModeEx(pMixedCtx)); /* EFER always up-to-date. */ 2490 pMixedCtx->msrLSTAR = u8GuestTpr; 2486 2491 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 2487 2492 { … … 2564 2569 * 2565 2570 * @returns VBox status code. 2571 * @param pVCpu Pointer to the VMCPU. 2572 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 2573 * out-of-sync. Make sure to update the required fields 2574 * before using them. 2575 * 2576 * @remarks No-long-jump zone!!! 2577 */ 2578 DECLINLINE(int) hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 2579 { 2580 int rc = VINF_SUCCESS; 2581 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RIP) 2582 { 2583 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip); 2584 AssertRCReturn(rc, rc); 2585 Log(("Load: VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip)); 2586 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP; 2587 } 2588 return rc; 2589 } 2590 2591 2592 /** 2593 * Loads the guest's RSP into the guest-state area in the VMCS. 2594 * 2595 * @returns VBox status code. 2596 * @param pVCpu Pointer to the VMCPU. 2597 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 2598 * out-of-sync. Make sure to update the required fields 2599 * before using them. 2600 * 2601 * @remarks No-long-jump zone!!! 2602 */ 2603 DECLINLINE(int) hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 2604 { 2605 int rc = VINF_SUCCESS; 2606 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RSP) 2607 { 2608 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp); 2609 AssertRCReturn(rc, rc); 2610 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP; 2611 } 2612 return rc; 2613 } 2614 2615 2616 /** 2617 * Loads the guest's RFLAGS into the guest-state area in the VMCS. 2618 * 2619 * @returns VBox status code. 2620 * @param pVCpu Pointer to the VMCPU. 2621 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 2622 * out-of-sync. Make sure to update the required fields 2623 * before using them. 2624 * 2625 * @remarks No-long-jump zone!!! 2626 */ 2627 DECLINLINE(int) hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 2628 { 2629 int rc = VINF_SUCCESS; 2630 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS) 2631 { 2632 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ). 2633 Let us assert it as such and use native-width VMWRITE. */ 2634 X86RFLAGS uRFlags = pMixedCtx->rflags; 2635 Assert(uRFlags.u64 >> 32 == 0); 2636 uRFlags.u64 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */ 2637 uRFlags.u64 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */ 2638 2639 /* 2640 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM exit. 2641 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode. 2642 */ 2643 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 2644 { 2645 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS); 2646 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM))); 2647 pVCpu->hm.s.vmx.RealMode.eflags.u32 = uRFlags.u64; /* Save the original eflags of the real-mode guest. */ 2648 uRFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */ 2649 uRFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */ 2650 } 2651 2652 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RFLAGS, uRFlags.u64); 2653 AssertRCReturn(rc, rc); 2654 2655 Log(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX64\n", uRFlags.u64)); 2656 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS; 2657 } 2658 return rc; 2659 } 2660 2661 2662 /** 2663 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS. 2664 * 2665 * @returns VBox status code. 2666 * @param pVCpu Pointer to the VMCPU. 2667 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 2668 * out-of-sync. Make sure to update the required fields 2669 * before using them. 2670 * 2671 * @remarks No-long-jump zone!!! 2672 */ 2673 DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 2674 { 2675 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx); 2676 rc |= hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx); 2677 rc |= hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx); 2678 return rc; 2679 } 2680 2681 2682 /** 2683 * Loads the guest control registers (CR0, CR3, CR4) into the guest-state area 2684 * in the VMCS. 2685 * 2686 * @returns VBox status code. 2566 2687 * @param pVM Pointer to the VM. 2567 2688 * @param pVCpu Pointer to the VMCPU. … … 2572 2693 * @remarks No-long-jump zone!!! 2573 2694 */ 2574 DECLINLINE(int) hmR0VmxLoadGuestRip(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx) 2575 { 2576 int rc = VINF_SUCCESS; 2577 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RIP) 2578 { 2579 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip); 2580 AssertRCReturn(rc, rc); 2581 Log(("Load: VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip)); 2582 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP; 2583 } 2584 return rc; 2585 } 2586 2587 2588 /** 2589 * Loads the guest's RSP into the guest-state area in the VMCS. 2590 * 2591 * @returns VBox status code. 2592 * @param pVM Pointer to the VM. 2593 * @param pVCpu Pointer to the VMCPU. 2594 * @param pCtx Pointer to the guest-CPU context. 2595 * 2596 * @remarks No-long-jump zone!!! 2597 */ 2598 DECLINLINE(int) hmR0VmxLoadGuestRsp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx) 2599 { 2600 int rc = VINF_SUCCESS; 2601 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RSP) 2602 { 2603 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp); 2604 AssertRCReturn(rc, rc); 2605 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP; 2606 } 2607 return rc; 2608 } 2609 2610 2611 /** 2612 * Loads the guest's RFLAGS into the guest-state area in the VMCS. 2613 * 2614 * @returns VBox status code. 2615 * @param pVM Pointer to the VM. 2616 * @param pVCpu Pointer to the VMCPU. 2617 * @param pCtx Pointer to the guest-CPU context. 2618 * 2619 * @remarks No-long-jump zone!!! 2620 */ 2621 DECLINLINE(int) hmR0VmxLoadGuestRflags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 2622 { 2623 int rc = VINF_SUCCESS; 2624 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS) 2625 { 2626 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ). 2627 Let us assert it as such and use native-width VMWRITE. */ 2628 X86RFLAGS uRFlags = pCtx->rflags; 2629 Assert(uRFlags.u64 >> 32 == 0); 2630 uRFlags.u64 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */ 2631 uRFlags.u64 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */ 2632 2633 /* 2634 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM exit. 2635 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode. 2636 */ 2637 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 2638 { 2639 Assert(pVM->hm.s.vmx.pRealModeTSS); 2640 Assert(PDMVmmDevHeapIsEnabled(pVM)); 2641 pVCpu->hm.s.vmx.RealMode.eflags.u32 = uRFlags.u64; /* Save the original eflags of the real-mode guest. */ 2642 uRFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */ 2643 uRFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */ 2644 } 2645 2646 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RFLAGS, uRFlags.u64); 2647 AssertRCReturn(rc, rc); 2648 2649 Log(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX64\n", uRFlags.u64)); 2650 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS; 2651 } 2652 return rc; 2653 } 2654 2655 2656 /** 2657 * Loads the guest's general purpose registers (GPRs) - RIP, RSP and RFLAGS 2658 * into the guest-state area in the VMCS. The remaining GPRs are handled in the 2659 * assembly code. 2660 * 2661 * @returns VBox status code. 2662 * @param pVM Pointer to the VM. 2663 * @param pVCpu Pointer to the VMCPU. 2664 * @param pCtx Pointer to the guest-CPU context. 2665 * 2666 * @remarks No-long-jump zone!!! 2667 */ 2668 DECLINLINE(int) hmR0VmxLoadGuestGprs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 2669 { 2670 LogFlowFunc(("pVM=%p pVCpu=%p pCtx=%p\n", pVM, pVCpu, pCtx)); 2671 int rc = hmR0VmxLoadGuestRip(pVM, pVCpu, pCtx); 2672 rc |= hmR0VmxLoadGuestRsp(pVM, pVCpu, pCtx); 2673 rc |= hmR0VmxLoadGuestRflags(pVM, pVCpu, pCtx); 2674 return rc; 2675 } 2676 2677 2678 /** 2679 * Loads the guest control registers (CR0, CR3, CR4) into the guest-state area 2680 * in the VMCS. 2681 * 2682 * @returns VBox status code. 2683 * @param pVM Pointer to the VM. 2684 * @param pVCpu Pointer to the VMCPU. 2685 * @param pCtx Pointer to the guest-CPU context. 2686 * 2687 * @remarks No-long-jump zone!!! 2688 */ 2689 DECLINLINE(int) hmR0VmxLoadGuestControlRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 2690 { 2691 int rc = VINF_SUCCESS; 2695 DECLINLINE(int) hmR0VmxLoadGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pCtx) 2696 { 2697 int rc = VINF_SUCCESS; 2698 PVM pVM = pVCpu->CTX_SUFF(pVM); 2692 2699 2693 2700 /* … … 3028 3035 * 3029 3036 * @returns VBox status code. 3030 * @param pVM Pointer to the VM.3031 3037 * @param pVCpu Pointer to the VMCPU. 3032 * @param pCtx Pointer to the guest-CPU context. 3038 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 3039 * out-of-sync. Make sure to update the required fields 3040 * before using them. 3033 3041 * 3034 3042 * @remarks No-long-jump zone!!! 3035 3043 */ 3036 DECLINLINE(int) hmR0VmxLoadGuestDebugRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)3044 DECLINLINE(int) hmR0VmxLoadGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3037 3045 { 3038 3046 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)) … … 3043 3051 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG) 3044 3052 { 3045 Assert((p Ctx->dr[7] & 0xffffffff00000000ULL) == 0); /* upper 32 bits are reserved (MBZ). */3053 Assert((pMixedCtx->dr[7] & 0xffffffff00000000ULL) == 0); /* upper 32 bits are reserved (MBZ). */ 3046 3054 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */ 3047 Assert((p Ctx->dr[7] & 0xd800) == 0); /* bits 15, 14, 12, 11 are reserved (MBZ). */3048 Assert((p Ctx->dr[7] & 0x400) == 0x400); /* bit 10 is reserved (MB1). */3055 Assert((pMixedCtx->dr[7] & 0xd800) == 0); /* bits 15, 14, 12, 11 are reserved (MBZ). */ 3056 Assert((pMixedCtx->dr[7] & 0x400) == 0x400); /* bit 10 is reserved (MB1). */ 3049 3057 } 3050 3058 #endif 3051 3059 3052 3060 int rc = VERR_INTERNAL_ERROR_5; 3061 PVM pVM = pVCpu->CTX_SUFF(pVM); 3053 3062 bool fInterceptDB = false; 3054 3063 bool fInterceptMovDRx = false; … … 3071 3080 if (!CPUMIsHyperDebugStateActive(pVCpu)) 3072 3081 { 3073 rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, p Ctx, true /* include DR6 */);3082 rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */); 3074 3083 AssertRC(rc); 3075 3084 } … … 3077 3086 fInterceptMovDRx = true; 3078 3087 } 3079 else if (p Ctx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))3088 else if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) 3080 3089 { 3081 3090 if (!CPUMIsGuestDebugStateActive(pVCpu)) 3082 3091 { 3083 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, p Ctx, true /* include DR6 */);3092 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */); 3084 3093 AssertRC(rc); 3085 3094 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed); … … 3114 3123 3115 3124 /* The guest's view of its DR7 is unblemished. */ 3116 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, p Ctx->dr[7]);3125 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]); 3117 3126 3118 3127 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG; … … 3289 3298 * 3290 3299 * @returns VBox status code. 3291 * @param pVM Pointer to the VM.3292 3300 * @param pVCpu Pointer to the VMCPU. 3293 3301 * @param idxSel Index of the selector in the VMCS. … … 3300 3308 * @remarks No-long-jump zone!!! 3301 3309 */ 3302 DECLINLINE(int) hmR0VmxWriteSegmentReg(PVM pVM, PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,3310 DECLINLINE(int) hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, 3303 3311 uint32_t idxAccess, PCPUMSELREG pSelReg, PCPUMCTX pCtx) 3304 3312 { … … 3314 3322 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */ 3315 3323 u32Access = 0xf3; 3316 Assert(pV M->hm.s.vmx.pRealModeTSS);3317 Assert(PDMVmmDevHeapIsEnabled(pV M));3324 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS); 3325 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM))); 3318 3326 } 3319 3327 else … … 3346 3354 * @param pVM Pointer to the VM. 3347 3355 * @param pVCPU Pointer to the VMCPU. 3348 * @param pCtx Pointer to the guest-CPU context. 3356 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 3357 * out-of-sync. Make sure to update the required fields 3358 * before using them. 3349 3359 * 3350 3360 * @remarks No-long-jump zone!!! 3351 3361 */ 3352 DECLINLINE(int) hmR0VmxLoadGuestSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 3353 { 3354 int rc = VERR_INTERNAL_ERROR_5; 3362 DECLINLINE(int) hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3363 { 3364 int rc = VERR_INTERNAL_ERROR_5; 3365 PVM pVM = pVCpu->CTX_SUFF(pVM); 3355 3366 3356 3367 /* … … 3362 3373 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 3363 3374 { 3364 pVCpu->hm.s.vmx.RealMode.uAttrCS.u = p Ctx->cs.Attr.u;3365 pVCpu->hm.s.vmx.RealMode.uAttrSS.u = p Ctx->ss.Attr.u;3366 pVCpu->hm.s.vmx.RealMode.uAttrDS.u = p Ctx->ds.Attr.u;3367 pVCpu->hm.s.vmx.RealMode.uAttrES.u = p Ctx->es.Attr.u;3368 pVCpu->hm.s.vmx.RealMode.uAttrFS.u = p Ctx->fs.Attr.u;3369 pVCpu->hm.s.vmx.RealMode.uAttrGS.u = p Ctx->gs.Attr.u;3375 pVCpu->hm.s.vmx.RealMode.uAttrCS.u = pMixedCtx->cs.Attr.u; 3376 pVCpu->hm.s.vmx.RealMode.uAttrSS.u = pMixedCtx->ss.Attr.u; 3377 pVCpu->hm.s.vmx.RealMode.uAttrDS.u = pMixedCtx->ds.Attr.u; 3378 pVCpu->hm.s.vmx.RealMode.uAttrES.u = pMixedCtx->es.Attr.u; 3379 pVCpu->hm.s.vmx.RealMode.uAttrFS.u = pMixedCtx->fs.Attr.u; 3380 pVCpu->hm.s.vmx.RealMode.uAttrGS.u = pMixedCtx->gs.Attr.u; 3370 3381 } 3371 3382 … … 3390 3401 } 3391 3402 #endif 3392 rc = hmR0VmxWriteSegmentReg(pV M, pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,3393 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &p Ctx->cs, pCtx);3394 rc |= hmR0VmxWriteSegmentReg(pV M, pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,3395 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pCtx->ss, pCtx);3396 rc |= hmR0VmxWriteSegmentReg(pV M, pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,3397 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pCtx->ds, pCtx);3398 rc |= hmR0VmxWriteSegmentReg(pV M, pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,3399 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pCtx->es, pCtx);3400 rc |= hmR0VmxWriteSegmentReg(pV M, pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,3401 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pCtx->fs, pCtx);3402 rc |= hmR0VmxWriteSegmentReg(pV M, pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,3403 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pCtx->gs, pCtx);3403 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE, 3404 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs, pMixedCtx); 3405 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE, 3406 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss, pMixedCtx); 3407 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE, 3408 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds, pMixedCtx); 3409 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE, 3410 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es, pMixedCtx); 3411 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE, 3412 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs, pMixedCtx); 3413 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE, 3414 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs, pMixedCtx); 3404 3415 AssertRCReturn(rc, rc); 3405 3416 3406 3417 #ifdef VBOX_STRICT 3407 hmR0VmxValidateSegmentRegs(pVM, pVCpu, p Ctx);3418 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx); 3408 3419 #endif 3409 3420 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS; … … 3427 3438 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 3428 3439 { 3429 u16Sel = p Ctx->tr.Sel;3430 u32Limit = p Ctx->tr.u32Limit;3431 u64Base = p Ctx->tr.u64Base;3432 u32AccessRights = p Ctx->tr.Attr.u;3440 u16Sel = pMixedCtx->tr.Sel; 3441 u32Limit = pMixedCtx->tr.u32Limit; 3442 u64Base = pMixedCtx->tr.u64Base; 3443 u32AccessRights = pMixedCtx->tr.Attr.u; 3433 3444 } 3434 3445 else … … 3458 3469 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights)); 3459 3470 AssertMsg(!(u32AccessRights & HMVMX_SEL_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights)); 3460 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/3461 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/3462 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */3463 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */3471 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/ 3472 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/ 3473 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */ 3474 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */ 3464 3475 Assert( (u32Limit & 0xfff) == 0xfff 3465 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */3466 Assert( !(p Ctx->tr.u32Limit & 0xfff00000)3467 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */3476 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */ 3477 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000) 3478 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */ 3468 3479 3469 3480 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel); … … 3482 3493 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR) 3483 3494 { 3484 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, p Ctx->gdtr.cbGdt);3485 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, p Ctx->gdtr.pGdt);3495 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); 3496 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); 3486 3497 AssertRCReturn(rc, rc); 3487 3498 3488 Assert(!(p Ctx->gdtr.cbGdt & 0xffff0000ULL)); /* Bits 31:16 MBZ. */3489 Log(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", p Ctx->gdtr.pGdt));3499 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000ULL)); /* Bits 31:16 MBZ. */ 3500 Log(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt)); 3490 3501 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR; 3491 3502 } … … 3498 3509 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */ 3499 3510 uint32_t u32Access = 0; 3500 if (!p Ctx->ldtr.Attr.u)3511 if (!pMixedCtx->ldtr.Attr.u) 3501 3512 u32Access = HMVMX_SEL_UNUSABLE; 3502 3513 else 3503 u32Access = p Ctx->ldtr.Attr.u;3504 3505 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, p Ctx->ldtr.Sel);3506 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, p Ctx->ldtr.u32Limit);3507 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, p Ctx->ldtr.u64Base);3514 u32Access = pMixedCtx->ldtr.Attr.u; 3515 3516 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel); 3517 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit); 3518 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base); 3508 3519 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); 3509 3520 AssertRCReturn(rc, rc); … … 3512 3523 if (!(u32Access & HMVMX_SEL_UNUSABLE)) 3513 3524 { 3514 Assert(!(p Ctx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */3515 Assert(p Ctx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */3516 Assert(!p Ctx->ldtr.Attr.n.u1DescType); /* System MBZ. */3517 Assert(p Ctx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */3518 Assert(!p Ctx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */3519 Assert(!(p Ctx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */3520 Assert( (p Ctx->ldtr.u32Limit & 0xfff) == 0xfff3521 || !p Ctx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */3522 Assert( !(p Ctx->ldtr.u32Limit & 0xfff00000)3523 || p Ctx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */3524 } 3525 3526 Log(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", p Ctx->ldtr.u64Base));3525 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */ 3526 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */ 3527 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */ 3528 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */ 3529 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */ 3530 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */ 3531 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff 3532 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */ 3533 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000) 3534 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */ 3535 } 3536 3537 Log(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base)); 3527 3538 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR; 3528 3539 } … … 3533 3544 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR) 3534 3545 { 3535 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, p Ctx->idtr.cbIdt);3536 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, p Ctx->idtr.pIdt);3546 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); 3547 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); 3537 3548 AssertRCReturn(rc, rc); 3538 3549 3539 Assert(!(p Ctx->idtr.cbIdt & 0xffff0000ULL)); /* Bits 31:16 MBZ. */3540 Log(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", p Ctx->idtr.pIdt));3550 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000ULL)); /* Bits 31:16 MBZ. */ 3551 Log(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt)); 3541 3552 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR; 3542 3553 } … … 3553 3564 * 3554 3565 * @returns VBox status code. 3555 * @param pVM Pointer to the VM.3556 3566 * @param pVCpu Pointer to the VMCPU. 3557 * @param pCtx Pointer to the guest-CPU context. 3567 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 3568 * out-of-sync. Make sure to update the required fields 3569 * before using them. 3558 3570 * 3559 3571 * @remarks No-long-jump zone!!! 3560 3572 */ 3561 DECLINLINE(int) hmR0VmxLoadGuestMsrs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)3573 DECLINLINE(int) hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3562 3574 { 3563 3575 AssertPtr(pVCpu); … … 3570 3582 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS) 3571 3583 { 3584 PVM pVM = pVCpu->CTX_SUFF(pVM); 3572 3585 PVMXMSR pGuestMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr; 3573 3586 uint32_t cGuestMsrs = 0; … … 3583 3596 pGuestMsr->u32IndexMSR = MSR_K6_EFER; 3584 3597 pGuestMsr->u32Reserved = 0; 3585 pGuestMsr->u64Value = p Ctx->msrEFER;3598 pGuestMsr->u64Value = pMixedCtx->msrEFER; 3586 3599 /* VT-x will complain if only MSR_K6_EFER_LME is set. See Intel spec. 26.4 "Loading MSRs" for details. */ 3587 if (!CPUMIsGuestInLongModeEx(p Ctx))3600 if (!CPUMIsGuestInLongModeEx(pMixedCtx)) 3588 3601 pGuestMsr->u64Value &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME); 3589 3602 pGuestMsr++; cGuestMsrs++; … … 3592 3605 pGuestMsr->u32IndexMSR = MSR_K8_LSTAR; 3593 3606 pGuestMsr->u32Reserved = 0; 3594 pGuestMsr->u64Value = p Ctx->msrLSTAR; /* 64 bits mode syscall rip */3607 pGuestMsr->u64Value = pMixedCtx->msrLSTAR; /* 64 bits mode syscall rip */ 3595 3608 pGuestMsr++; cGuestMsrs++; 3596 3609 pGuestMsr->u32IndexMSR = MSR_K6_STAR; 3597 3610 pGuestMsr->u32Reserved = 0; 3598 pGuestMsr->u64Value = p Ctx->msrSTAR; /* legacy syscall eip, cs & ss */3611 pGuestMsr->u64Value = pMixedCtx->msrSTAR; /* legacy syscall eip, cs & ss */ 3599 3612 pGuestMsr++; cGuestMsrs++; 3600 3613 pGuestMsr->u32IndexMSR = MSR_K8_SF_MASK; 3601 3614 pGuestMsr->u32Reserved = 0; 3602 pGuestMsr->u64Value = p Ctx->msrSFMASK; /* syscall flag mask */3615 pGuestMsr->u64Value = pMixedCtx->msrSFMASK; /* syscall flag mask */ 3603 3616 pGuestMsr++; cGuestMsrs++; 3604 3617 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */ … … 3606 3619 pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE; 3607 3620 pGuestMsr->u32Reserved = 0; 3608 pGuestMsr->u64Value = p Ctx->msrKERNELGSBASE; /* swapgs exchange value */3621 pGuestMsr->u64Value = pMixedCtx->msrKERNELGSBASE; /* swapgs exchange value */ 3609 3622 pGuestMsr++; cGuestMsrs++; 3610 3623 #endif … … 3649 3662 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_CS_MSR) 3650 3663 { 3651 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, p Ctx->SysEnter.cs);3664 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); 3652 3665 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_CS_MSR; 3653 3666 } 3654 3667 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_EIP_MSR) 3655 3668 { 3656 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, p Ctx->SysEnter.eip);3669 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); 3657 3670 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR; 3658 3671 } 3659 3672 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_ESP_MSR) 3660 3673 { 3661 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, p Ctx->SysEnter.esp);3674 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); 3662 3675 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR; 3663 3676 } … … 3672 3685 * 3673 3686 * @returns VBox status code. 3674 * @param pVM Pointer to the VM.3675 3687 * @param pVCpu Pointer to the VMCPU. 3676 * @param pCtx Pointer to the guest-CPU context. 3688 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 3689 * out-of-sync. Make sure to update the required fields 3690 * before using them. 3677 3691 * 3678 3692 * @remarks No-long-jump zone!!! 3679 3693 */ 3680 DECLINLINE(int) hmR0VmxLoadGuestActivityState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)3694 DECLINLINE(int) hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx) 3681 3695 { 3682 3696 /** @todo See if we can make use of other states, e.g. … … 3697 3711 * 3698 3712 * @returns VBox status code. 3699 * @param pVM Pointer to the VM.3700 3713 * @param pVCpu Pointer to the VMCPU. 3701 * @param pCtx Pointer to the guest-CPU context. 3714 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 3715 * out-of-sync. Make sure to update the required fields 3716 * before using them. 3702 3717 * 3703 3718 * @remarks No-long-jump zone!!! 3704 3719 */ 3705 DECLINLINE(int) hmR0VmxSetupVMRunHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)3706 { 3707 if (CPUMIsGuestInLongModeEx(p Ctx))3720 DECLINLINE(int) hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3721 { 3722 if (CPUMIsGuestInLongModeEx(pMixedCtx)) 3708 3723 { 3709 3724 #ifndef VBOX_ENABLE_64_BITS_GUESTS 3710 3725 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 3711 3726 #endif 3712 Assert(pV M->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */3727 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */ 3713 3728 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 3714 3729 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */ … … 4511 4526 * 4512 4527 * @returns VBox status code. 4513 * @param pVM Pointer to the VM.4514 4528 * @param pVCpu Pointer to the VMCPU. 4515 4529 * @param pMixedCtx Pointer to the guest-CPU context. The data may be … … 4518 4532 * @remarks No-long-jump zone!!! 4519 4533 */ 4520 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)4521 { 4522 int rc= VERR_INTERNAL_ERROR_5;4534 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4535 { 4536 int rc = VERR_INTERNAL_ERROR_5; 4523 4537 bool fOffsettedTsc = false; 4538 PVM pVM = pVCpu->CTX_SUFF(pVM); 4524 4539 if (pVM->hm.s.vmx.fUsePreemptTimer) 4525 4540 { … … 6149 6164 /* Cannot inject an NMI when block-by-MOV SS is in effect. */ 6150 6165 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI 6151 || !( (*puIntrState)& VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));6166 || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS)); 6152 6167 6153 6168 /* We require CR0 to check if the guest is in real-mode. */ … … 6386 6401 * @param pVM Pointer to the VM. 6387 6402 * @param pVCpu Pointer to the VMCPU. 6388 * @param pCtx Pointer to the guest-CPU context. 6403 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 6404 * out-of-sync. Make sure to update the required fields 6405 * before using them. 6389 6406 * 6390 6407 * @remarks No-long-jump zone!!! 6391 6408 */ 6392 VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX p Ctx)6409 VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6393 6410 { 6394 6411 AssertPtr(pVM); 6395 6412 AssertPtr(pVCpu); 6396 AssertPtr(p Ctx);6413 AssertPtr(pMixedCtx); 6397 6414 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 6398 6415 … … 6404 6421 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false; 6405 6422 if ( !pVM->hm.s.vmx.fUnrestrictedGuest 6406 && CPUMIsGuestInRealModeEx(p Ctx))6423 && CPUMIsGuestInRealModeEx(pMixedCtx)) 6407 6424 { 6408 6425 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true; 6409 6426 } 6410 6427 6411 int rc = hmR0VmxLoadGuestEntryCtls(pV M, pVCpu, pCtx);6428 int rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx); 6412 6429 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6413 6430 6414 rc = hmR0VmxLoadGuestExitCtls(pV M, pVCpu, pCtx);6431 rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx); 6415 6432 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6416 6433 6417 rc = hmR0VmxLoadGuestActivityState(pV M, pVCpu, pCtx);6434 rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx); 6418 6435 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6419 6436 6420 rc = hmR0VmxLoadGuestControlRegs(pV M, pVCpu, pCtx);6437 rc = hmR0VmxLoadGuestControlRegs(pVCpu, pMixedCtx); 6421 6438 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestControlRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6422 6439 6423 rc = hmR0VmxLoadGuestSegmentRegs(pV M, pVCpu, pCtx);6440 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx); 6424 6441 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6425 6442 6426 rc = hmR0VmxLoadGuestDebugRegs(pV M, pVCpu, pCtx);6443 rc = hmR0VmxLoadGuestDebugRegs(pVCpu, pMixedCtx); 6427 6444 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestDebugRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6428 6445 6429 rc = hmR0VmxLoadGuestMsrs(pV M, pVCpu, pCtx);6446 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx); 6430 6447 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6431 6448 6432 rc = hmR0VmxLoadGuestApicState(pV M, pVCpu, pCtx);6449 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx); 6433 6450 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6434 6451 6435 rc = hmR0VmxLoadGuest Gprs(pVM, pVCpu, pCtx);6452 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx); 6436 6453 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestGprs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6437 6454 6438 rc = hmR0VmxSetupVMRunHandler(pV M, pVCpu, pCtx);6455 rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx); 6439 6456 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6440 6457 … … 6568 6585 if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP) 6569 6586 { 6570 rc = hmR0VmxLoadGuestRip(pV M, pVCpu, pMixedCtx);6587 rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx); 6571 6588 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal); 6572 6589 } … … 6589 6606 if (pVmxTransient->fUpdateTscOffsettingAndPreemptTimer) 6590 6607 { 6591 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pV M, pVCpu, pMixedCtx);6608 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pMixedCtx); 6592 6609 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false; 6593 6610 } … … 6951 6968 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 6952 6969 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt); 6970 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 6971 Assert(ASMIntAreEnabled()); 6953 6972 return VINF_SUCCESS; 6973 #else 6974 return VINF_EM_RAW_INTERRUPT; 6975 #endif 6954 6976 } 6955 6977 … … 8574 8596 8575 8597 int rc = VERR_INTERNAL_ERROR_5; 8576 8577 8598 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 8578 8599 { -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r45531 r45653 2941 2941 } 2942 2942 2943 Log2(("\n E"));2943 Log2(("\n")); 2944 2944 2945 2945 /* This is not ideal, but if we don't clear the event injection in the VMCS right here,
Note:
See TracChangeset
for help on using the changeset viewer.