Changeset 72208 in vbox
- Timestamp:
- May 15, 2018 4:11:35 AM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 122647
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r71152 r72208 49 49 VMM_COMMON_DEFS += VBOX_WITH_3RD_IEM_STEP 50 50 endif 51 ifdef VBOX_WITH_NESTED_HWVIRT 52 VMM_COMMON_DEFS += VBOX_WITH_NESTED_HWVIRT 53 ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM 54 VMM_COMMON_DEFS +=VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM55 endif51 ifdef VBOX_WITH_NESTED_HWVIRT_SVM 52 VMM_COMMON_DEFS += VBOX_WITH_NESTED_HWVIRT_SVM 53 endif 54 ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM 55 VMM_COMMON_DEFS += VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM 56 56 endif 57 57 #ifdef VBOX_WITH_IEM … … 570 570 VMMRC_DEFS = IN_VMM_RC IN_RT_RC IN_DIS DIS_CORE_ONLY VBOX_WITH_RAW_MODE VBOX_WITH_RAW_MODE_NOT_R0 IN_SUP_RC \ 571 571 $(VMM_COMMON_DEFS) 572 VMMRC_DEFS := $(filter-out VBOX_WITH_NESTED_HWVIRT ,$(VMMRC_DEFS))572 VMMRC_DEFS := $(filter-out VBOX_WITH_NESTED_HWVIRT_SVM,$(VMMRC_DEFS)) 573 573 ifdef VBOX_WITH_VMM_R0_SWITCH_STACK 574 574 VMMRC_DEFS += VMM_R0_SWITCH_STACK -
trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
r71755 r72208 184 184 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 185 185 *puValue = TMCpuTickGet(pVCpu); 186 #ifdef VBOX_WITH_NESTED_HWVIRT 186 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 187 187 *puValue = CPUMApplyNestedGuestTscOffset(pVCpu, *puValue); 188 188 #endif … … 345 345 * what we want? */ 346 346 *puValue = TMCpuTickGet(pVCpu); 347 #ifdef VBOX_WITH_NESTED_HWVIRT 347 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 348 348 *puValue = CPUMApplyNestedGuestTscOffset(pVCpu, *puValue); 349 349 #endif … … 368 368 * what we want? */ 369 369 *puValue = TMCpuTickGet(pVCpu); 370 #ifdef VBOX_WITH_NESTED_HWVIRT 370 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 371 371 *puValue = CPUMApplyNestedGuestTscOffset(pVCpu, *puValue); 372 372 #endif … … 4934 4934 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Gim(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) 4935 4935 { 4936 #ifdef VBOX_WITH_NESTED_HWVIRT 4936 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 4937 4937 /* Raise #GP(0) like a physical CPU would since the nested-hypervisor hasn't intercept these MSRs. */ 4938 4938 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest; … … 4947 4947 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrWr_Gim(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) 4948 4948 { 4949 #ifdef VBOX_WITH_NESTED_HWVIRT 4949 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 4950 4950 /* Raise #GP(0) like a physical CPU would since the nested-hypervisor hasn't intercept these MSRs. */ 4951 4951 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest; -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r71341 r72208 1316 1316 1317 1317 uint64_t uTicks = TMCpuTickGet(pVCpu); 1318 #ifdef VBOX_WITH_NESTED_HWVIRT 1318 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1319 1319 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks); 1320 1320 #endif … … 1355 1355 1356 1356 uint64_t uTicks = TMCpuTickGet(pVCpu); 1357 #ifdef VBOX_WITH_NESTED_HWVIRT 1357 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1358 1358 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks); 1359 1359 #endif -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r71529 r72208 632 632 g_HmR0.aCpuInfo[i].HCPhysMemObj = NIL_RTHCPHYS; 633 633 g_HmR0.aCpuInfo[i].pvMemObj = NULL; 634 #ifdef VBOX_WITH_NESTED_HWVIRT 634 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 635 635 g_HmR0.aCpuInfo[i].n.svm.hNstGstMsrpm = NIL_RTR0MEMOBJ; 636 636 g_HmR0.aCpuInfo[i].n.svm.HCPhysNstGstMsrpm = NIL_RTHCPHYS; … … 790 790 g_HmR0.aCpuInfo[i].pvMemObj = NULL; 791 791 } 792 #ifdef VBOX_WITH_NESTED_HWVIRT 792 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 793 793 if (g_HmR0.aCpuInfo[i].n.svm.hNstGstMsrpm != NIL_RTR0MEMOBJ) 794 794 { … … 944 944 Assert(!g_HmR0.aCpuInfo[i].cTlbFlushes); 945 945 Assert(!g_HmR0.aCpuInfo[i].uCurrentAsid); 946 # ifdef VBOX_WITH_NESTED_HWVIRT 946 # ifdef VBOX_WITH_NESTED_HWVIRT_SVM 947 947 Assert(g_HmR0.aCpuInfo[i].n.svm.hNstGstMsrpm == NIL_RTR0MEMOBJ); 948 948 Assert(g_HmR0.aCpuInfo[i].n.svm.HCPhysNstGstMsrpm == NIL_RTHCPHYS); … … 978 978 { 979 979 Assert(g_HmR0.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ); 980 #ifdef VBOX_WITH_NESTED_HWVIRT 980 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 981 981 Assert(g_HmR0.aCpuInfo[i].n.svm.hNstGstMsrpm == NIL_RTR0MEMOBJ); 982 982 #endif … … 995 995 ASMMemZeroPage(g_HmR0.aCpuInfo[i].pvMemObj); 996 996 997 #ifdef VBOX_WITH_NESTED_HWVIRT 997 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 998 998 rc = RTR0MemObjAllocCont(&g_HmR0.aCpuInfo[i].n.svm.hNstGstMsrpm, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, 999 999 false /* executable R0 mapping */); -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r72178 r72208 59 59 } while (0) 60 60 61 # ifdef VBOX_WITH_NESTED_HWVIRT 61 # ifdef VBOX_WITH_NESTED_HWVIRT_SVM 62 62 # define HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \ 63 63 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); \ … … 70 70 #else 71 71 # define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0) 72 # ifdef VBOX_WITH_NESTED_HWVIRT 72 # ifdef VBOX_WITH_NESTED_HWVIRT_SVM 73 73 # define HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0) 74 74 # endif … … 83 83 * \#VMEXIT intercepts that maybe caused during delivering of another 84 84 * event in the guest. */ 85 #ifdef VBOX_WITH_NESTED_HWVIRT 85 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 86 86 # define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY() \ 87 87 do \ … … 139 139 140 140 /** Assert that we're not executing a nested-guest. */ 141 #ifdef VBOX_WITH_NESTED_HWVIRT 141 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 142 142 # define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx) Assert(!CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx))) 143 143 #else … … 146 146 147 147 /** Assert that we're executing a nested-guest. */ 148 #ifdef VBOX_WITH_NESTED_HWVIRT 148 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 149 149 # define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx) Assert(CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx))) 150 150 #else … … 370 370 static FNSVMEXITHANDLER hmR0SvmExitXcptAC; 371 371 static FNSVMEXITHANDLER hmR0SvmExitXcptBP; 372 #if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(VBOX_WITH_NESTED_HWVIRT )372 #if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(VBOX_WITH_NESTED_HWVIRT_SVM) 373 373 static FNSVMEXITHANDLER hmR0SvmExitXcptGeneric; 374 374 #endif 375 #ifdef VBOX_WITH_NESTED_HWVIRT 375 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 376 376 static FNSVMEXITHANDLER hmR0SvmExitXcptPFNested; 377 377 static FNSVMEXITHANDLER hmR0SvmExitClgi; … … 387 387 388 388 static int hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient); 389 #ifdef VBOX_WITH_NESTED_HWVIRT 389 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 390 390 static int hmR0SvmHandleExitNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 391 391 #endif … … 757 757 { 758 758 PVM pVM = pVCpu->CTX_SUFF(pVM); 759 #ifdef VBOX_WITH_NESTED_HWVIRT 759 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 760 760 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 761 761 { … … 780 780 { 781 781 PVM pVM = pVCpu->CTX_SUFF(pVM); 782 #ifdef VBOX_WITH_NESTED_HWVIRT 782 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 783 783 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 784 784 { … … 803 803 { 804 804 PVM pVM = pVCpu->CTX_SUFF(pVM); 805 #ifdef VBOX_WITH_NESTED_HWVIRT 805 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 806 806 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 807 807 { … … 847 847 if (!fInNestedGuestMode) 848 848 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit); 849 #ifdef VBOX_WITH_NESTED_HWVIRT 849 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 850 850 else 851 851 { … … 867 867 if (!fInNestedGuestMode) 868 868 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit + 1); 869 #ifdef VBOX_WITH_NESTED_HWVIRT 869 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 870 870 else 871 871 { … … 903 903 bool const fUseLbrVirt = fLbrVirt; /** @todo CFGM, IEM implementation etc. */ 904 904 905 #ifdef VBOX_WITH_NESTED_HWVIRT 905 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 906 906 bool const fVirtVmsaveVmload = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD); 907 907 bool const fUseVirtVmsaveVmload = fVirtVmsaveVmload && pVM->hm.s.svm.fVirtVmsaveVmload && pVM->hm.s.fNestedPaging; … … 953 953 #endif 954 954 955 #ifdef VBOX_WITH_NESTED_HWVIRT 955 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 956 956 /* Virtualized VMSAVE/VMLOAD. */ 957 957 pVmcbCtrl->LbrVirt.n.u1VirtVmsaveVmload = fUseVirtVmsaveVmload; … … 1087 1087 DECLINLINE(PSVMVMCB) hmR0SvmGetCurrentVmcb(PVMCPU pVCpu, PCPUMCTX pCtx) 1088 1088 { 1089 #ifdef VBOX_WITH_NESTED_HWVIRT 1089 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1090 1090 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 1091 1091 return pCtx->hwvirt.svm.CTX_SUFF(pVmcb); … … 1106 1106 DECLINLINE(PSVMNESTEDVMCBCACHE) hmR0SvmGetNestedVmcbCache(PVMCPU pVCpu, PCPUMCTX pCtx) 1107 1107 { 1108 #ifdef VBOX_WITH_NESTED_HWVIRT 1108 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1109 1109 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); RT_NOREF(pCtx); 1110 1110 return &pVCpu->hm.s.svm.NstGstVmcbCache; … … 1165 1165 static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb, PHMGLOBALCPUINFO pHostCpu) 1166 1166 { 1167 #ifndef VBOX_WITH_NESTED_HWVIRT 1167 #ifndef VBOX_WITH_NESTED_HWVIRT_SVM 1168 1168 RT_NOREF(pCtx); 1169 1169 #endif … … 1184 1184 if ( pVCpu->hm.s.idLastCpu != pHostCpu->idCpu 1185 1185 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes 1186 #ifdef VBOX_WITH_NESTED_HWVIRT 1186 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1187 1187 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx) 1188 1188 #endif … … 1411 1411 { 1412 1412 bool fRemove = true; 1413 #ifdef VBOX_WITH_NESTED_HWVIRT 1413 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1414 1414 /* Only remove the intercept if the nested-guest is also not intercepting it! */ 1415 1415 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) … … 1467 1467 { 1468 1468 bool fRemove = true; 1469 #ifdef VBOX_WITH_NESTED_HWVIRT 1469 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1470 1470 /* Only remove the control intercept if the nested-guest is also not intercepting it! */ 1471 1471 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) … … 1989 1989 1990 1990 1991 #ifdef VBOX_WITH_NESTED_HWVIRT 1991 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1992 1992 /** 1993 1993 * Loads the nested-guest APIC state (currently just the TPR). … … 2146 2146 2147 2147 2148 #ifdef VBOX_WITH_NESTED_HWVIRT 2148 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 2149 2149 /** 2150 2150 * Merges guest and nested-guest intercepts for executing the nested-guest using … … 2406 2406 pVmcb->guest.u64RAX = pCtx->rax; 2407 2407 2408 #ifdef VBOX_WITH_NESTED_HWVIRT 2408 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 2409 2409 if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable) 2410 2410 { … … 2449 2449 2450 2450 2451 #ifdef VBOX_WITH_NESTED_HWVIRT 2451 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 2452 2452 /** 2453 2453 * Merges the guest and nested-guest MSR permission bitmap. … … 2628 2628 pVmcbNstGst->guest.u64RAX = pCtx->rax; 2629 2629 2630 #ifdef VBOX_WITH_NESTED_HWVIRT 2630 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 2631 2631 Assert(!pVmcbNstGst->ctrl.IntCtrl.n.u1VGifEnable); /* Nested VGIF not supported yet. */ 2632 2632 #endif … … 2660 2660 return rc; 2661 2661 } 2662 #endif /* VBOX_WITH_NESTED_HWVIRT */2662 #endif /* VBOX_WITH_NESTED_HWVIRT_SVM */ 2663 2663 2664 2664 … … 2731 2731 2732 2732 PCSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl; 2733 #ifdef VBOX_WITH_NESTED_HWVIRT 2733 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 2734 2734 if (!CPUMIsGuestInSvmNestedHwVirtMode(pMixedCtx)) 2735 2735 { … … 3495 3495 * VINTR intercept all being set. 3496 3496 */ 3497 #ifdef VBOX_WITH_NESTED_HWVIRT 3497 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 3498 3498 /* 3499 3499 * Currently we don't overlay interupt windows and if there's any V_IRQ pending … … 3547 3547 } 3548 3548 3549 #ifdef VBOX_WITH_NESTED_HWVIRT 3549 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 3550 3550 /** 3551 3551 * Evaluates the event to be delivered to the nested-guest and sets it as the … … 3679 3679 Assert(pVmcb); 3680 3680 3681 #ifdef VBOX_WITH_NESTED_HWVIRT 3681 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 3682 3682 bool const fGif = pCtx->hwvirt.fGif; 3683 3683 #else … … 4099 4099 4100 4100 4101 #ifdef VBOX_WITH_NESTED_HWVIRT 4101 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 4102 4102 /** 4103 4103 * Does the preparations before executing nested-guest code in AMD-V. … … 4120 4120 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx); 4121 4121 4122 #ifdef VBOX_WITH_NESTED_HWVIRT_ ONLY_IN_IEM4122 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM_ONLY_IN_IEM 4123 4123 Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n")); 4124 4124 return VINF_EM_RESCHEDULE_REM; … … 4468 4468 4469 4469 4470 #ifdef VBOX_WITH_NESTED_HWVIRT 4470 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 4471 4471 /** 4472 4472 * Undoes the TSC offset applied for an SVM nested-guest and returns the TSC … … 4790 4790 } 4791 4791 4792 #ifdef VBOX_WITH_NESTED_HWVIRT 4792 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 4793 4793 /** 4794 4794 * Runs the nested-guest code using AMD-V. … … 4899 4899 uint32_t cLoops = 0; 4900 4900 int rc; 4901 #ifdef VBOX_WITH_NESTED_HWVIRT 4901 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 4902 4902 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 4903 4903 #endif … … 4908 4908 rc = hmR0SvmRunGuestCodeStep(pVM, pVCpu, pCtx, &cLoops); 4909 4909 } 4910 #ifdef VBOX_WITH_NESTED_HWVIRT 4910 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 4911 4911 else 4912 4912 { … … 4937 4937 4938 4938 4939 #ifdef VBOX_WITH_NESTED_HWVIRT 4939 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 4940 4940 /** 4941 4941 * Determines whether an IOIO intercept is active for the nested-guest or not. … … 5554 5554 } 5555 5555 5556 #ifdef VBOX_WITH_NESTED_HWVIRT 5556 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 5557 5557 case SVM_EXIT_CLGI: return hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient); 5558 5558 case SVM_EXIT_STGI: return hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient); … … 6121 6121 6122 6122 6123 #ifdef VBOX_WITH_NESTED_HWVIRT 6123 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 6124 6124 /** 6125 6125 * Gets the length of the current instruction if the CPU supports the NRIP_SAVE … … 7544 7544 7545 7545 7546 #if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(VBOX_WITH_NESTED_HWVIRT )7546 #if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(VBOX_WITH_NESTED_HWVIRT_SVM) 7547 7547 /** 7548 7548 * \#VMEXIT handler for generic exceptions. Conditional \#VMEXIT. … … 7588 7588 #endif 7589 7589 7590 #ifdef VBOX_WITH_NESTED_HWVIRT 7590 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 7591 7591 /** 7592 7592 * \#VMEXIT handler for #PF occuring while in nested-guest execution … … 7824 7824 } 7825 7825 7826 #endif /* VBOX_WITH_NESTED_HWVIRT */7826 #endif /* VBOX_WITH_NESTED_HWVIRT_SVM */ 7827 7827 7828 7828 -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r71676 r72208 3933 3933 AssertLogRelRCReturn(rc, rc); 3934 3934 3935 #ifdef VBOX_WITH_NESTED_HWVIRT 3935 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 3936 3936 /** @cfgm{/CPUM/NestedHWVirt, bool, false} 3937 3937 * Whether to expose the hardware virtualization (VMX/SVM) feature to the guest. -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r72065 r72208 1656 1656 } 1657 1657 1658 #ifdef VBOX_WITH_NESTED_HWVIRT 1658 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1659 1659 /** 1660 1660 * Helper for emR3ForcedActions() for injecting interrupts into the … … 2077 2077 Assert(!HMR3IsEventPending(pVCpu)); 2078 2078 PCPUMCTX pCtx = pVCpu->em.s.pCtx; 2079 #ifdef VBOX_WITH_NESTED_HWVIRT 2079 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 2080 2080 if (CPUMIsGuestInNestedHwVirtMode(pCtx)) 2081 2081 { … … 2096 2096 { 2097 2097 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 2098 #ifdef VBOX_WITH_NESTED_HWVIRT 2098 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 2099 2099 && pCtx->hwvirt.fGif 2100 2100 #endif -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r72178 r72208 1085 1085 # endif 1086 1086 1087 #ifdef VBOX_WITH_NESTED_HWVIRT 1087 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1088 1088 /* 1089 1089 * Nested-guest Exit reason stats. -
trunk/src/VBox/VMM/include/EMHandleRCTmpl.h
r70979 r72208 248 248 * we still need to implement hypercalls rather than throw a #UD. 249 249 */ 250 #ifdef VBOX_WITH_NESTED_HWVIRT 250 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 251 251 if (pVM->cpum.ro.GuestFeatures.fSvm) 252 252 { -
trunk/src/VBox/VMM/include/HMInternal.h
r72178 r72208 286 286 /** In use by our code. (for power suspend) */ 287 287 volatile bool fInUse; 288 #ifdef VBOX_WITH_NESTED_HWVIRT 288 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 289 289 /** Nested-guest union (put data common to SVM/VMX outside the union). */ 290 290 union
Note:
See TracChangeset
for help on using the changeset viewer.