Changeset 71415 in vbox for trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
- Timestamp:
- Mar 21, 2018 9:29:22 AM (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r71383 r71415 180 180 * while executing the guest or nested-guest. 181 181 */ 182 #define HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS ( SVM_CTRL_INTERCEPT_INTR \ 183 | SVM_CTRL_INTERCEPT_NMI \ 184 | SVM_CTRL_INTERCEPT_INIT \ 185 | SVM_CTRL_INTERCEPT_RDPMC \ 186 | SVM_CTRL_INTERCEPT_CPUID \ 187 | SVM_CTRL_INTERCEPT_RSM \ 188 | SVM_CTRL_INTERCEPT_HLT \ 189 | SVM_CTRL_INTERCEPT_IOIO_PROT \ 190 | SVM_CTRL_INTERCEPT_MSR_PROT \ 191 | SVM_CTRL_INTERCEPT_INVLPGA \ 192 | SVM_CTRL_INTERCEPT_SHUTDOWN \ 193 | SVM_CTRL_INTERCEPT_FERR_FREEZE \ 194 | SVM_CTRL_INTERCEPT_VMRUN \ 195 | SVM_CTRL_INTERCEPT_SKINIT \ 196 | SVM_CTRL_INTERCEPT_WBINVD \ 197 | SVM_CTRL_INTERCEPT_MONITOR \ 198 | SVM_CTRL_INTERCEPT_MWAIT \ 182 #define HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS ( SVM_CTRL_INTERCEPT_INTR \ 183 | SVM_CTRL_INTERCEPT_NMI \ 184 | SVM_CTRL_INTERCEPT_INIT \ 185 | SVM_CTRL_INTERCEPT_RDPMC \ 186 | SVM_CTRL_INTERCEPT_CPUID \ 187 | SVM_CTRL_INTERCEPT_RSM \ 188 | SVM_CTRL_INTERCEPT_HLT \ 189 | SVM_CTRL_INTERCEPT_IOIO_PROT \ 190 | SVM_CTRL_INTERCEPT_MSR_PROT \ 191 | SVM_CTRL_INTERCEPT_INVLPGA \ 192 | SVM_CTRL_INTERCEPT_SHUTDOWN \ 193 | SVM_CTRL_INTERCEPT_FERR_FREEZE \ 194 | SVM_CTRL_INTERCEPT_VMRUN \ 195 | SVM_CTRL_INTERCEPT_SKINIT \ 196 | SVM_CTRL_INTERCEPT_WBINVD \ 197 | SVM_CTRL_INTERCEPT_MONITOR \ 198 | SVM_CTRL_INTERCEPT_MWAIT \ 199 | SVM_CTRL_INTERCEPT_CR0_SEL_WRITE \ 199 200 | SVM_CTRL_INTERCEPT_XSETBV) 200 201 … … 360 361 static FNSVMEXITHANDLER hmR0SvmExitIret; 361 362 static FNSVMEXITHANDLER hmR0SvmExitXcptPF; 362 static FNSVMEXITHANDLER hmR0SvmExitXcptNM;363 363 static FNSVMEXITHANDLER hmR0SvmExitXcptUD; 364 364 static FNSVMEXITHANDLER hmR0SvmExitXcptMF; … … 951 951 | SVM_CTRL_INTERCEPT_VMMCALL; 952 952 953 /* CR0, CR4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */ 953 /* 954 * CR0, CR4 reads/writes must be intercepted, as our shadow values may differ from the guest's. 955 * These interceptions might be relaxed later during VM execution if the conditions allow. 956 */ 954 957 pVmcb->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4); 955 956 /* CR0, CR4 writes must be intercepted for the same reasons as above. */957 958 pVmcb->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4); 958 959 … … 1426 1427 static void hmR0SvmLoadSharedCR0(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 1427 1428 { 1428 uint64_t u64GuestCR0 = pCtx->cr0; 1429 /* The guest FPU is now always pre-loaded before executing guest code, see @bugref{7243#c101}. */ 1430 Assert(CPUMIsGuestFPUStateActive(pVCpu)); 1431 1432 uint64_t const uGuestCr0 = pCtx->cr0; 1433 uint64_t uShadowCr0 = uGuestCr0; 1429 1434 1430 1435 /* Always enable caching. */ 1431 u64GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); 1436 uShadowCr0 &= ~(X86_CR0_CD | X86_CR0_NW); 1437 1438 /* When Nested Paging is not available use shadow page tables and intercept #PFs (the latter done in SVMR0SetupVM()). */ 1439 if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging) 1440 { 1441 uShadowCr0 |= X86_CR0_PG /* Use shadow page tables. */ 1442 | X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */ 1443 } 1432 1444 1433 1445 /* 1434 * When Nested Paging is not available use shadow page tables and intercept #PFs (the latter done in SVMR0SetupVM()). 1446 * If the shadow and guest CR0 are identical we can avoid intercepting CR0 reads. 1447 * 1448 * CR0 writes still needs interception as PGM requires tracking paging mode changes, see @bugref{6944}. 1449 * We also don't ever want to honor weird things like cache disable from the guest. However, we can 1450 * avoid intercepting changes to the TS & MP bits by clearing the CR0 write intercept below and keeping 1451 * SVM_CTRL_INTERCEPT_CR0_SEL_WRITE instead. 1435 1452 */ 1436 if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging) 1437 { 1438 u64GuestCR0 |= X86_CR0_PG /* When Nested Paging is not available, use shadow page tables. */ 1439 | X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */ 1440 } 1441 1442 /* 1443 * Guest FPU bits. 1444 */ 1445 bool fInterceptNM = false; 1446 bool fInterceptMF = false; 1447 u64GuestCR0 |= X86_CR0_NE; /* Use internal x87 FPU exceptions handling rather than external interrupts. */ 1448 if (CPUMIsGuestFPUStateActive(pVCpu)) 1449 { 1450 /* Catch floating point exceptions if we need to report them to the guest in a different way. */ 1451 if (!(pCtx->cr0 & X86_CR0_NE)) 1452 { 1453 Log4(("hmR0SvmLoadSharedCR0: Intercepting Guest CR0.MP Old-style FPU handling!!!\n")); 1454 fInterceptMF = true; 1453 if (uShadowCr0 == uGuestCr0) 1454 { 1455 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 1456 { 1457 pVmcb->ctrl.u16InterceptRdCRx &= ~RT_BIT(0); 1458 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(0); 1459 Assert(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_CR0_SEL_WRITE); 1460 } 1461 else 1462 { 1463 /* If the nested-hypervisor intercepts CR0 reads/writes, we need to continue intercepting them. */ 1464 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 1465 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); 1466 pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx & ~RT_BIT(0)) 1467 | (pVmcbNstGstCache->u16InterceptRdCRx & RT_BIT(0)); 1468 pVmcb->ctrl.u16InterceptWrCRx = (pVmcb->ctrl.u16InterceptWrCRx & ~RT_BIT(0)) 1469 | (pVmcbNstGstCache->u16InterceptWrCRx & RT_BIT(0)); 1455 1470 } 1456 1471 } 1457 1472 else 1458 1473 { 1459 fInterceptNM = true; /* Guest FPU inactive, #VMEXIT on #NM for lazy FPU loading. */ 1460 u64GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */ 1461 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */ 1462 } 1463 1464 /* 1465 * Update the exception intercept bitmap. 1466 */ 1467 if (fInterceptNM) 1468 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_NM); 1469 else 1470 hmR0SvmRemoveXcptIntercept(pVCpu, pCtx, pVmcb, X86_XCPT_NM); 1471 1472 if (fInterceptMF) 1473 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_MF); 1474 else 1475 hmR0SvmRemoveXcptIntercept(pVCpu, pCtx, pVmcb, X86_XCPT_MF); 1476 1477 pVmcb->guest.u64CR0 = u64GuestCR0; 1478 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1474 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(0); 1475 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(0); 1476 } 1477 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 1478 1479 Assert(RT_HI_U32(uShadowCr0) == 0); 1480 if (pVmcb->guest.u64CR0 != uShadowCr0) 1481 { 1482 pVmcb->guest.u64CR0 = uShadowCr0; 1483 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1484 } 1479 1485 } 1480 1486 … … 2046 2052 * - VMMCALL: Exclude the outer guest intercept as when it's also not intercepted by 2047 2053 * the nested-guest, the physical CPU raises a \#UD exception as expected. 2054 * 2055 * - SVM_CTRL_INTERCEPT_CR0_SEL_WRITE: Is always required as we want to track PGM mode 2056 * changes and not honor cache disable changes even by the nested-guest. 2048 2057 */ 2049 2058 pVmcbNstGst->ctrl.u64InterceptCtrl |= (pVmcb->ctrl.u64InterceptCtrl & ~( SVM_CTRL_INTERCEPT_VINTR … … 2318 2327 PCSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 2319 2328 PCSVMVMCBSTATESAVE pVmcbNstGstState = &pVmcbNstGst->guest; 2320 PSVMNESTEDVMCBCACHE p NstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache;2329 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 2321 2330 2322 2331 /* … … 2330 2339 if (!fWasCached) 2331 2340 { 2332 p NstGstVmcbCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx;2333 p NstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx;2334 p NstGstVmcbCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx;2335 p NstGstVmcbCache->u16InterceptWrDRx = pVmcbNstGstCtrl->u16InterceptWrDRx;2336 p NstGstVmcbCache->u32InterceptXcpt = pVmcbNstGstCtrl->u32InterceptXcpt;2337 p NstGstVmcbCache->u64InterceptCtrl = pVmcbNstGstCtrl->u64InterceptCtrl;2338 p NstGstVmcbCache->u64CR0 = pVmcbNstGstState->u64CR0;2339 p NstGstVmcbCache->u64CR3 = pVmcbNstGstState->u64CR3;2340 p NstGstVmcbCache->u64CR4 = pVmcbNstGstState->u64CR4;2341 p NstGstVmcbCache->u64EFER = pVmcbNstGstState->u64EFER;2342 p NstGstVmcbCache->u64DBGCTL = pVmcbNstGstState->u64DBGCTL;2343 p NstGstVmcbCache->u64IOPMPhysAddr = pVmcbNstGstCtrl->u64IOPMPhysAddr;2344 p NstGstVmcbCache->u64MSRPMPhysAddr = pVmcbNstGstCtrl->u64MSRPMPhysAddr;2345 p NstGstVmcbCache->u64TSCOffset = pVmcbNstGstCtrl->u64TSCOffset;2346 p NstGstVmcbCache->u32VmcbCleanBits = pVmcbNstGstCtrl->u32VmcbCleanBits;2347 p NstGstVmcbCache->fVIntrMasking = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;2348 p NstGstVmcbCache->TLBCtrl = pVmcbNstGstCtrl->TLBCtrl;2349 p NstGstVmcbCache->u1NestedPaging = pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging;2350 p NstGstVmcbCache->u1LbrVirt = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt;2341 pVmcbNstGstCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx; 2342 pVmcbNstGstCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx; 2343 pVmcbNstGstCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx; 2344 pVmcbNstGstCache->u16InterceptWrDRx = pVmcbNstGstCtrl->u16InterceptWrDRx; 2345 pVmcbNstGstCache->u32InterceptXcpt = pVmcbNstGstCtrl->u32InterceptXcpt; 2346 pVmcbNstGstCache->u64InterceptCtrl = pVmcbNstGstCtrl->u64InterceptCtrl; 2347 pVmcbNstGstCache->u64CR0 = pVmcbNstGstState->u64CR0; 2348 pVmcbNstGstCache->u64CR3 = pVmcbNstGstState->u64CR3; 2349 pVmcbNstGstCache->u64CR4 = pVmcbNstGstState->u64CR4; 2350 pVmcbNstGstCache->u64EFER = pVmcbNstGstState->u64EFER; 2351 pVmcbNstGstCache->u64DBGCTL = pVmcbNstGstState->u64DBGCTL; 2352 pVmcbNstGstCache->u64IOPMPhysAddr = pVmcbNstGstCtrl->u64IOPMPhysAddr; 2353 pVmcbNstGstCache->u64MSRPMPhysAddr = pVmcbNstGstCtrl->u64MSRPMPhysAddr; 2354 pVmcbNstGstCache->u64TSCOffset = pVmcbNstGstCtrl->u64TSCOffset; 2355 pVmcbNstGstCache->u32VmcbCleanBits = pVmcbNstGstCtrl->u32VmcbCleanBits; 2356 pVmcbNstGstCache->fVIntrMasking = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking; 2357 pVmcbNstGstCache->TLBCtrl = pVmcbNstGstCtrl->TLBCtrl; 2358 pVmcbNstGstCache->u1NestedPaging = pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging; 2359 pVmcbNstGstCache->u1LbrVirt = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt; 2351 2360 pCtx->hwvirt.svm.fHMCachedVmcb = true; 2352 2361 Log4(("hmR0SvmVmRunCacheVmcb: Cached VMCB fields\n")); … … 2569 2578 2570 2579 /* 2571 * Guest Control registers: CR 2, CR3 (handled at the end) - accesses to other control registers are always intercepted.2580 * Guest Control registers: CR0, CR2, CR3 (handled at the end) - accesses to other control registers are always intercepted. 2572 2581 */ 2573 2582 pMixedCtx->cr2 = pVmcb->guest.u64CR2; 2583 2584 /* If we're not intercepting changes to CR0 TS & MP bits, sync those bits here. */ 2585 if (!(pVmcb->ctrl.u16InterceptWrCRx & RT_BIT(0))) 2586 { 2587 pMixedCtx->cr0 = (pMixedCtx->cr0 & ~(X86_CR0_TS | X86_CR0_MP)) 2588 | (pVmcb->guest.u64CR0 & (X86_CR0_TS | X86_CR0_MP)); 2589 } 2574 2590 2575 2591 /* … … 4247 4263 hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcbNstGst); 4248 4264 4249 if ( pVCpu->hm.s.fPreloadGuestFpu4250 && !CPUMIsGuestFPUStateActive(pVCpu))4251 {4265 if (!CPUMIsGuestFPUStateActive(pVCpu)) 4266 { 4267 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x); 4252 4268 CPUMR0LoadGuestFPU(pVM, pVCpu); /* (Ignore rc, no need to set HM_CHANGED_HOST_CONTEXT for SVM.) */ 4269 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x); 4270 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu); 4253 4271 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 4254 4272 } … … 4359 4377 hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcb); 4360 4378 4361 if ( pVCpu->hm.s.fPreloadGuestFpu4362 && !CPUMIsGuestFPUStateActive(pVCpu))4363 {4379 if (!CPUMIsGuestFPUStateActive(pVCpu)) 4380 { 4381 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x); 4364 4382 CPUMR0LoadGuestFPU(pVM, pVCpu); /* (Ignore rc, no need to set HM_CHANGED_HOST_CONTEXT for SVM.) */ 4383 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x); 4384 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu); 4365 4385 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 4366 4386 } … … 5134 5154 } 5135 5155 5136 case SVM_EXIT_EXCEPTION_7: /* X86_XCPT_NM */5137 {5138 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_NM))5139 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5140 hmR0SvmSetPendingXcptNM(pVCpu);5141 return VINF_SUCCESS;5142 }5143 5144 5156 case SVM_EXIT_EXCEPTION_6: /* X86_XCPT_UD */ 5145 5157 { … … 5154 5166 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_MF)) 5155 5167 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5156 hmR0SvmSetPendingXcptMF(pVCpu); 5157 return VINF_SUCCESS; 5168 return hmR0SvmExitXcptMF(pVCpu, pCtx, pSvmTransient); 5158 5169 } 5159 5170 … … 5189 5200 } 5190 5201 5202 case SVM_EXIT_CR0_SEL_WRITE: 5203 { 5204 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE)) 5205 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5206 return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient); 5207 } 5208 5191 5209 case SVM_EXIT_WRITE_CR0: 5192 5210 case SVM_EXIT_WRITE_CR3: 5193 5211 case SVM_EXIT_WRITE_CR4: 5194 case SVM_EXIT_WRITE_CR8: /** @todo Shouldn't writes to CR8 go to V_TPR instead since we run with V_INTR_MASKING set? ?*/5212 case SVM_EXIT_WRITE_CR8: /** @todo Shouldn't writes to CR8 go to V_TPR instead since we run with V_INTR_MASKING set? */ 5195 5213 { 5196 5214 uint8_t const uCr = uExitCode - SVM_EXIT_WRITE_CR0; … … 5294 5312 case SVM_EXIT_EXCEPTION_0: /*case SVM_EXIT_EXCEPTION_1:*/ case SVM_EXIT_EXCEPTION_2: 5295 5313 /*case SVM_EXIT_EXCEPTION_3:*/ case SVM_EXIT_EXCEPTION_4: case SVM_EXIT_EXCEPTION_5: 5296 /*case SVM_EXIT_EXCEPTION_6:*/ /*case SVM_EXIT_EXCEPTION_7:*/case SVM_EXIT_EXCEPTION_8:5314 /*case SVM_EXIT_EXCEPTION_6:*/ case SVM_EXIT_EXCEPTION_7: case SVM_EXIT_EXCEPTION_8: 5297 5315 case SVM_EXIT_EXCEPTION_9: case SVM_EXIT_EXCEPTION_10: case SVM_EXIT_EXCEPTION_11: 5298 5316 case SVM_EXIT_EXCEPTION_12: case SVM_EXIT_EXCEPTION_13: /*case SVM_EXIT_EXCEPTION_14:*/ … … 5476 5494 return hmR0SvmExitXcptPF(pVCpu, pCtx, pSvmTransient); 5477 5495 5478 case SVM_EXIT_EXCEPTION_7: /* X86_XCPT_NM */5479 return hmR0SvmExitXcptNM(pVCpu, pCtx, pSvmTransient);5480 5481 5496 case SVM_EXIT_EXCEPTION_6: /* X86_XCPT_UD */ 5482 5497 return hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient); … … 5508 5523 return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient); 5509 5524 5525 case SVM_EXIT_CR0_SEL_WRITE: 5510 5526 case SVM_EXIT_WRITE_CR0: 5511 5527 case SVM_EXIT_WRITE_CR3: … … 5513 5529 case SVM_EXIT_WRITE_CR8: 5514 5530 { 5515 uint8_t const uCr = uExitCode - SVM_EXIT_WRITE_CR0;5531 uint8_t const uCr = uExitCode == SVM_EXIT_CR0_SEL_WRITE ? 0 : uExitCode - SVM_EXIT_WRITE_CR0; 5516 5532 Log4(("hmR0SvmHandleExit: Write CR%u\n", uCr)); NOREF(uCr); 5517 5533 return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient); … … 5615 5631 case SVM_EXIT_EXCEPTION_5: /* X86_XCPT_BR */ 5616 5632 /* SVM_EXIT_EXCEPTION_6: */ /* X86_XCPT_UD - Handled above. */ 5617 /* SVM_EXIT_EXCEPTION_7: */ /* X86_XCPT_NM - Handled above.*/5633 case SVM_EXIT_EXCEPTION_7: /* X86_XCPT_NM */ 5618 5634 case SVM_EXIT_EXCEPTION_8: /* X86_XCPT_DF */ 5619 5635 case SVM_EXIT_EXCEPTION_9: /* X86_XCPT_CO_SEG_OVERRUN */ … … 6521 6537 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6522 6538 6523 uint8_t const iCrReg = pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0; 6539 uint64_t const uExitCode = pSvmTransient->u64ExitCode; 6540 uint8_t const iCrReg = uExitCode == SVM_EXIT_CR0_SEL_WRITE ? 0 : (pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0); 6524 6541 Assert(iCrReg <= 15); 6525 6542 … … 7375 7392 7376 7393 /** 7377 * \#VMEXIT handler for device-not-available exceptions (SVM_EXIT_EXCEPTION_7).7378 * Conditional \#VMEXIT.7379 */7380 HMSVM_EXIT_DECL hmR0SvmExitXcptNM(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7381 {7382 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();7383 7384 /* Paranoia; Ensure we cannot be called as a result of event delivery. */7385 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;7386 Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);7387 7388 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */7389 VMMRZCallRing3Disable(pVCpu);7390 HM_DISABLE_PREEMPT();7391 7392 int rc;7393 /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */7394 if (pSvmTransient->fWasGuestFPUStateActive)7395 {7396 rc = VINF_EM_RAW_GUEST_TRAP;7397 Assert(CPUMIsGuestFPUStateActive(pVCpu) || HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));7398 }7399 else7400 {7401 #ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS7402 Assert(!pSvmTransient->fWasGuestFPUStateActive);7403 #endif7404 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu); /* (No need to set HM_CHANGED_HOST_CONTEXT for SVM.) */7405 Assert( rc == VINF_EM_RAW_GUEST_TRAP7406 || ((rc == VINF_SUCCESS || rc == VINF_CPUM_HOST_CR0_MODIFIED) && CPUMIsGuestFPUStateActive(pVCpu)));7407 }7408 7409 HM_RESTORE_PREEMPT();7410 VMMRZCallRing3Enable(pVCpu);7411 7412 if (rc == VINF_SUCCESS || rc == VINF_CPUM_HOST_CR0_MODIFIED)7413 {7414 /* Guest FPU state was activated, we'll want to change CR0 FPU intercepts before the next VM-reentry. */7415 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);7416 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);7417 pVCpu->hm.s.fPreloadGuestFpu = true;7418 }7419 else7420 {7421 /* Forward #NM to the guest. */7422 Assert(rc == VINF_EM_RAW_GUEST_TRAP);7423 hmR0SvmSetPendingXcptNM(pVCpu);7424 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);7425 }7426 return VINF_SUCCESS;7427 }7428 7429 7430 /**7431 7394 * \#VMEXIT handler for undefined opcode (SVM_EXIT_EXCEPTION_6). 7432 7395 * Conditional \#VMEXIT. … … 7481 7444 7482 7445 /* Paranoia; Ensure we cannot be called as a result of event delivery. */ 7483 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;7446 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 7484 7447 Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb); 7485 7448 … … 7494 7457 if (RT_SUCCESS(rc)) 7495 7458 { 7459 #ifdef VBOX_WITH_NESTED_HWVIRT 7460 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx) 7461 && HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_FERR_FREEZE)) 7462 { 7463 return VBOXSTRICTRC_TODO(IEMExecSvmVmexit(pVCpu, SVM_EXIT_FERR_FREEZE, 0, 0)); 7464 } 7465 #endif 7496 7466 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */ 7497 /** @todo FERR intercept when in nested-guest mode? */ 7498 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */); 7467 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */); 7499 7468 if (RT_SUCCESS(rc)) 7500 7469 pCtx->rip += cbOp;
Note:
See TracChangeset
for help on using the changeset viewer.