- Timestamp:
- Jun 12, 2013 3:22:14 PM (12 years ago)
- Location:
- trunk
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm_svm.h
r46503 r46512 71 71 */ 72 72 /** Invalid guest state in VMCB. */ 73 #define SVM_EXIT_INVALID -173 #define SVM_EXIT_INVALID (-1) 74 74 /** Read from CR0-CR15. */ 75 75 #define SVM_EXIT_READ_CR0 0x0 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r46508 r46512 29 29 * Defined Constants And Macros * 30 30 *******************************************************************************/ 31 #ifdef VBOX_WITH_STATISTICS 32 # define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \ 33 if ((u64ExitCode) == SVM_EXIT_NPF) \ 34 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); \ 35 else \ 36 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[(u64ExitCode) & MASK_EXITREASON_STAT]); \ 37 } while (0) 38 #else 39 # define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0) 40 #endif 41 31 42 /** @name Segment attribute conversion between CPU and AMD-V VMCB format. 32 43 * … … 129 140 #endif 130 141 131 /** The #VMEXIT exit code . */142 /** The #VMEXIT exit code (the EXITCODE field in the VMCB). */ 132 143 uint64_t u64ExitCode; 133 144 } SVMTRANSIENT, *PSVMTRANSIENT; … … 144 155 /** Reading this MSR does not cause a VM-exit. */ 145 156 SVMMSREXIT_PASSTHRU_READ 146 } VMXMSREXITREAD;157 } SVMMSREXITREAD; 147 158 148 159 /** … … 155 166 /** Writing to this MSR does not cause a VM-exit. */ 156 167 SVMMSREXIT_PASSTHRU_WRITE 157 } VMXMSREXITWRITE;168 } SVMMSREXITWRITE; 158 169 159 170 … … 162 173 *******************************************************************************/ 163 174 static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite); 175 176 DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient); 164 177 165 178 … … 489 502 ASMBitClear(pbMsrBitmap, ulBit + 1); 490 503 491 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;504 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM; 492 505 } 493 506 … … 582 595 583 596 /* Initially set all VMCB clean bits to 0 indicating that everything should be loaded from memory. */ 584 pVmcb-> u64VmcbCleanBits = 0;597 pVmcb->ctrl.u64VmcbCleanBits = 0; 585 598 586 599 /* The guest ASID MBNZ, set it to 1. The host uses 0. */ … … 749 762 { 750 763 pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid; 751 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;764 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID; 752 765 } 753 766 … … 852 865 { 853 866 pVmcb->ctrl.u32InterceptException |= RT_BIT(u32Xcpt); 854 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;867 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 855 868 } 856 869 } … … 858 871 DECLINLINE(void) hmR0SvmRemoveXcptIntercept(uint32_t u32Xcpt) 859 872 { 860 #ifndef HM VMX_ALWAYS_TRAP_ALL_XCPTS873 #ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS 861 874 if (pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt)) 862 875 { 863 876 pVmcb->ctrl.u32InterceptException &= ~RT_BIT(u32Xcpt); 864 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;877 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 865 878 } 866 879 #endif … … 935 948 936 949 pVmcb->guest.u64CR0 = u64GuestCR0; 937 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;950 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX; 938 951 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0; 939 952 } … … 945 958 { 946 959 pVmcb->guest.u64CR2 = pCtx->cr2; 947 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;960 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2; 948 961 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2; 949 962 } … … 965 978 966 979 pVmcb->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode); 967 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;980 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP; 968 981 Assert(pVmcb->ctrl.u64NestedPagingCR3); 969 982 pVmcb->guest.u64CR3 = pCtx->cr3; … … 972 985 pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu); 973 986 974 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;987 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX; 975 988 pVCpu->hm.s.fContextUseFlags &= HM_CHANGED_GUEST_CR3; 976 989 } … … 1017 1030 1018 1031 pVmcb->guest.u64CR4 = u64GuestCR4; 1019 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;1032 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX; 1020 1033 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4; 1021 1034 } … … 1045 1058 HMSVM_LOAD_SEG_REG(GS, cs); 1046 1059 1047 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;1060 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG; 1048 1061 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS; 1049 1062 } … … 1068 1081 pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt; 1069 1082 pVmcb->guest.GDTR.u64Base = pCtx->gdtr.pGdt; 1070 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;1083 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT; 1071 1084 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR; 1072 1085 } … … 1077 1090 pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt; 1078 1091 pVmcb->guest.IDTR.u64Base = pCtx->idtr.pIdt; 1079 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;1092 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT; 1080 1093 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR; 1081 1094 } … … 1151 1164 pVmcb->guest.u64DR7 = pCtx->dr[7]; 1152 1165 pVmcb->guest.u64DR6 = pCtx->dr[6]; 1153 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;1166 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX; 1154 1167 1155 1168 bool fInterceptDB = false; … … 1172 1185 pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu); 1173 1186 pVmcb->guest.u64DR6 = CPUMGetHyperDR6(pVCpu); 1174 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;1187 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX; 1175 1188 } 1176 1189 Assert(CPUMIsHyperDebugStateActive(pVCpu)); … … 1206 1219 pVmcb->ctrl.u16InterceptRdDRx = 0xffff; 1207 1220 pVmcb->ctrl.u16InterceptWrDRx = 0xffff; 1208 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;1221 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 1209 1222 } 1210 1223 } … … 1216 1229 pVmcb->ctrl.u16InterceptRdDRx = 0; 1217 1230 pVmcb->ctrl.u16InterceptWrDRx = 0; 1218 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;1231 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 1219 1232 } 1220 1233 } … … 1290 1303 VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1291 1304 { 1292 AssertPtr(pVCpu);1293 1305 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1294 1306 NOREF(pVM); 1307 NOREF(pVCpu); 1295 1308 NOREF(pCtx); 1296 1309 1297 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 1298 1299 /** -xxx- todo. */ 1300 1310 /* Nothing to do here. Everything is taken care of in hmR0SvmLongJmpToRing3(). */ 1301 1311 return VINF_SUCCESS; 1302 1312 } … … 1399 1409 1400 1410 /* 1401 * Save all the MSRs that can be changed by the guest without causing a world switch. 1402 * FS & GS base are saved with HMSVM_SAVE_SEG_REG. 1411 * Control registers: CR2, CR3 (handled at the end) - accesses to other control registers are always intercepted. 1412 */ 1413 pMixedCtx->cr2 = pVmcb->guest.u64CR2; 1414 1415 /* 1416 * Guest MSRs. 1403 1417 */ 1404 1418 pMixedCtx->msrSTAR = pVmcb->guest.u64STAR; /* legacy syscall eip, cs & ss */ … … 1411 1425 pMixedCtx->SysEnter.esp = pVmcb->guest.u64SysEnterESP; 1412 1426 1413 /* Can be updated behind our back in the nested paging case. */ 1414 pMixedCtx->cr2 = pVmcb->guest.u64CR2; 1415 1416 /* Segment registers: CS, SS, DS, ES, FS, GS. */ 1427 /* 1428 * Guest segment registers (includes FS, GS base MSRs for 64-bit guests). 1429 */ 1417 1430 HMSVM_SAVE_SEG_REG(CS, ss); 1418 1431 HMSVM_SAVE_SEG_REG(SS, cs); … … 1461 1474 1462 1475 /* 1463 * Descriptor Table Registers: TR, IDTR, GDTR, LDTR.1476 * Guest Descriptor-Table registers. 1464 1477 */ 1465 1478 HMSVM_SAVE_SEG_REG(TR, tr); 1466 1479 HMSVM_SAVE_SEG_REG(LDTR, ldtr); 1467 1468 1480 pMixedCtx->gdtr.cbGdt = pVmcb->guest.GDTR.u32Limit; 1469 1481 pMixedCtx->gdtr.pGdt = pVmcb->guest.GDTR.u64Base; … … 1473 1485 1474 1486 /* 1475 * Debug registers.1487 * Guest Debug registers. 1476 1488 */ 1477 1489 pMixedCtx->dr[6] = pVmcb->guest.u64DR6; … … 1480 1492 /* 1481 1493 * With Nested Paging, CR3 changes are not intercepted. Therefore, sync. it now. 1494 * This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3. 1482 1495 */ 1483 1496 if ( pVM->hm.s.fNestedPaging … … 1485 1498 { 1486 1499 CPUMSetGuestCR3(pVCpu, pVmcb->guest.u64CR3); 1487 PGMUpdateCR3(pVCpu, pVmcb->guest.u64CR3); /* This may longjmp to ring-3 hence done at the very end. */1500 PGMUpdateCR3(pVCpu, pVmcb->guest.u64CR3); 1488 1501 } 1489 1502 } … … 1506 1519 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 1507 1520 1508 /* Restore FPU state if necessary and resync on next R0 reentry .*/1521 /* Restore host FPU state if necessary and resync on next R0 reentry .*/ 1509 1522 if (CPUMIsGuestFPUStateActive(pVCpu)) 1510 1523 { … … 1514 1527 } 1515 1528 1516 /* Restore debug registers if necessary and resync on next R0 reentry. */1529 /* Restore host debug registers if necessary and resync on next R0 reentry. */ 1517 1530 if (CPUMIsGuestDebugStateActive(pVCpu)) 1518 1531 { … … 1531 1544 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 1532 1545 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 1546 } 1547 1548 1549 /** 1550 * VMMRZCallRing3() callback wrapper which saves the guest state (or restores 1551 * any remaining host state) before we longjump to ring-3 and possibly get 1552 * preempted. 1553 * 1554 * @param pVCpu Pointer to the VMCPU. 1555 * @param enmOperation The operation causing the ring-3 longjump. 1556 * @param pvUser The user argument (pointer to the possibly 1557 * out-of-date guest-CPU context). 1558 * 1559 * @remarks Must never be called with @a enmOperation == 1560 * VMMCALLRING3_VM_R0_ASSERTION. 1561 */ 1562 DECLCALLBACK(void) hmR0SvmCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser) 1563 { 1564 /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion, */ 1565 Assert(pVCpu); 1566 Assert(pvUser); 1567 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 1568 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1569 1570 VMMRZCallRing3Disable(pVCpu); 1571 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 1572 Log4(("hmR0SvmCallRing3Callback->hmR0SvmLongJmpToRing3\n")); 1573 hmR0SvmLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser, VINF_VMM_UNKNOWN_RING3_CALL); 1574 VMMRZCallRing3Enable(pVCpu); 1533 1575 } 1534 1576 … … 1626 1668 } 1627 1669 1628 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;1670 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 1629 1671 } 1630 1672 … … 1706 1748 1707 1749 /* Refer AMD spec. 15.20 "Event Injection" for the format. */ 1708 uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;1709 1750 if (enmTrpmEvent == TRPM_TRAP) 1710 1751 { … … 1752 1793 * @param pvCpu Pointer to the VMCPU. 1753 1794 */ 1754 static void hmR0 VmxPendingEventToTrpmTrap(PVMCPU pVCpu)1795 static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu) 1755 1796 { 1756 1797 Assert(pVCpu->hm.s.Event.fPending); … … 1796 1837 else if (uVectorType == SVM_EVENT_SOFTWARE_INT) 1797 1838 { 1798 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT1839 AssertMsg( uVectorType == SVM_EVENT_SOFTWARE_INT 1799 1840 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF), 1800 1841 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType)); … … 1853 1894 pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0; /* Not necessary as we #VMEXIT for delivering the interrupt. */ 1854 1895 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR; 1855 pVmcb-> u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;1896 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 1856 1897 } 1857 1898 } … … 1958 1999 { 1959 2000 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 2001 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 2002 1960 2003 if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE) 1961 2004 { 1962 2005 HMDumpRegs(pVM, pVCpu, pCtx); 1963 2006 #ifdef VBOX_STRICT 2007 Log4(("ctrl.u64VmcbCleanBits %#RX64\n", pVmcb->ctrl.u64VmcbCleanBits)); 1964 2008 Log4(("ctrl.u16InterceptRdCRx %#x\n", pVmcb->ctrl.u16InterceptRdCRx)); 1965 2009 Log4(("ctrl.u16InterceptWrCRx %#x\n", pVmcb->ctrl.u16InterceptWrCRx)); … … 2193 2237 { 2194 2238 /* Check force flag actions that might require us to go back to ring-3. */ 2195 int rc = hmR0 VmxCheckForceFlags(pVM, pVCpu, pCtx);2239 int rc = hmR0SvmCheckForceFlags(pVM, pVCpu, pCtx); 2196 2240 if (rc != VINF_SUCCESS) 2197 2241 return rc; … … 2341 2385 2342 2386 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 2343 pVmcb-> u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */2387 pVmcb->ctrl.u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */ 2344 2388 2345 2389 /* Restore host's TSC_AUX if required. */ … … 2361 2405 2362 2406 ASMSetFlags(pSvmTransient->uEFlags); /* Enable interrupts. */ 2363 VMMRZCallRing3SetNotification(pVCpu, hmR0 VmxCallRing3Callback, pMixedCtx);2407 VMMRZCallRing3SetNotification(pVCpu, hmR0SvmCallRing3Callback, pMixedCtx); 2364 2408 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */ 2365 2409 … … 2430 2474 2431 2475 /* Handle the #VMEXIT. */ 2432 AssertMsg(SvmTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason)); 2433 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]); 2434 2435 /* -xxx- todo. */ 2436 2476 AssertMsg(SvmTransient.u64ExitCode != SVM_EXIT_INVALID, ("%#x\n", SvmTransient.u64ExitCode)); 2477 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode); 2478 rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient); 2437 2479 if (rc != VINF_SUCCESS) 2438 2480 break; … … 2453 2495 } 2454 2496 2497 2498 /** 2499 * Handles a #VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID). 2500 * 2501 * @returns VBox status code (informational status codes included). 2502 * @param pVCpu Pointer to the VMCPU. 2503 * @param pCtx Pointer to the guest-CPU context. 2504 * @param pSvmTransient Pointer to the SVM transient structure. 2505 */ 2506 DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient) 2507 { 2508 int rc; 2509 uint32_t u32ExitCode = pSvmTransient->u64ExitCode; 2510 switch (u32ExitCode) 2511 { 2512 2513 } 2514 return rc; 2515 2516 } 2517 2518 #ifdef DEBUG 2519 /* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */ 2520 # define HMSVM_ASSERT_PREEMPT_CPUID_VAR() \ 2521 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId() 2522 2523 # define HMSVM_ASSERT_PREEMPT_CPUID() \ 2524 do \ 2525 { \ 2526 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \ 2527 AssertMsg(idAssertCpu == idAssertCpuNow, ("SVM %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \ 2528 } while (0) 2529 2530 # define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() \ 2531 do { \ 2532 AssertPtr(pVCpu); \ 2533 AssertPtr(pMixedCtx); \ 2534 AssertPtr(pSvmTransient); \ 2535 Assert(ASMIntAreEnabled()); \ 2536 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \ 2537 HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \ 2538 Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (uint32_t)pVCpu->idCpu)); \ 2539 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \ 2540 if (VMMR0IsLogFlushDisabled(pVCpu)) \ 2541 HMSVM_ASSERT_PREEMPT_CPUID(); \ 2542 HMSVM_STOP_EXIT_DISPATCH_PROF(); \ 2543 } while (0) 2544 #else /* Release builds */ 2545 # define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() do { } while(0) 2546 #endif 2547
Note:
See TracChangeset
for help on using the changeset viewer.