Changeset 71529 in vbox for trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
- Timestamp:
- Mar 28, 2018 6:32:43 AM (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r71504 r71529 326 326 * Internal Functions * 327 327 *********************************************************************************************************************************/ 328 static void hmR0SvmSetMsrPermission(P SVMVMCB pVmcb, uint8_t *pbMsrBitmap, unsigned uMsr, SVMMSREXITREAD enmRead,328 static void hmR0SvmSetMsrPermission(PCPUMCTX pCtx, uint8_t *pbMsrBitmap, unsigned uMsr, SVMMSREXITREAD enmRead, 329 329 SVMMSREXITWRITE enmWrite); 330 330 static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu); … … 393 393 *********************************************************************************************************************************/ 394 394 /** Ring-0 memory object for the IO bitmap. */ 395 RTR0MEMOBJg_hMemObjIOBitmap = NIL_RTR0MEMOBJ;395 static RTR0MEMOBJ g_hMemObjIOBitmap = NIL_RTR0MEMOBJ; 396 396 /** Physical address of the IO bitmap. */ 397 RTHCPHYS g_HCPhysIOBitmap = 0;397 static RTHCPHYS g_HCPhysIOBitmap; 398 398 /** Pointer to the IO bitmap. */ 399 R0PTRTYPE(void *) g_pvIOBitmap = NULL; 400 401 #ifdef VBOX_WITH_NESTED_HWVIRT 402 /** Ring-0 memory object for the nested-guest MSRPM bitmap. */ 403 RTR0MEMOBJ g_hMemObjNstGstMsrBitmap = NIL_RTR0MEMOBJ; 404 /** Physical address of the nested-guest MSRPM bitmap. */ 405 RTHCPHYS g_HCPhysNstGstMsrBitmap = 0; 406 /** Pointer to the nested-guest MSRPM bitmap. */ 407 R0PTRTYPE(void *) g_pvNstGstMsrBitmap = NULL; 408 #endif 409 399 static R0PTRTYPE(void *) g_pvIOBitmap; 410 400 411 401 #ifdef VBOX_STRICT 412 # define HMSVM_LOG_CS RT_BIT_32(0)413 # define HMSVM_LOG_SS RT_BIT_32(1)414 # define HMSVM_LOG_FS RT_BIT_32(2)415 # define HMSVM_LOG_GS RT_BIT_32(3)416 # define HMSVM_LOG_LBR RT_BIT_32(4)417 # define HMSVM_LOG_ALL ( HMSVM_LOG_CS \418 | HMSVM_LOG_SS \419 | HMSVM_LOG_FS \420 | HMSVM_LOG_GS \421 | HMSVM_LOG_LBR)422 423 /** 424 * Dumps CPU state and additional info. to the logger for diagnostics.402 # define HMSVM_LOG_CS RT_BIT_32(0) 403 # define HMSVM_LOG_SS RT_BIT_32(1) 404 # define HMSVM_LOG_FS RT_BIT_32(2) 405 # define HMSVM_LOG_GS RT_BIT_32(3) 406 # define HMSVM_LOG_LBR RT_BIT_32(4) 407 # define HMSVM_LOG_ALL ( HMSVM_LOG_CS \ 408 | HMSVM_LOG_SS \ 409 | HMSVM_LOG_FS \ 410 | HMSVM_LOG_GS \ 411 | HMSVM_LOG_LBR) 412 413 /** 414 * Dumps virtual CPU state and additional info. to the logger for diagnostics. 425 415 * 426 416 * @param pVCpu The cross context virtual CPU structure. … … 468 458 NOREF(pVmcbGuest); 469 459 } 470 #endif 460 #endif /* VBOX_STRICT */ 471 461 472 462 … … 585 575 { 586 576 /* 587 * Allocate 12 KB for the IO bitmap. Since this is non-optional and we always intercept all IO accesses, it's done588 * once globally here instead of per-VM.577 * Allocate 12 KB (3 pages) for the IO bitmap. Since this is non-optional and we always 578 * intercept all IO accesses, it's done once globally here instead of per-VM. 589 579 */ 590 580 Assert(g_hMemObjIOBitmap == NIL_RTR0MEMOBJ); … … 599 589 ASMMemFill32(g_pvIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff)); 600 590 601 #ifdef VBOX_WITH_NESTED_HWVIRT602 /*603 * Allocate 8 KB for the MSR permission bitmap for the nested-guest.604 */605 Assert(g_hMemObjNstGstMsrBitmap == NIL_RTR0MEMOBJ);606 rc = RTR0MemObjAllocCont(&g_hMemObjNstGstMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, false /* fExecutable */);607 if (RT_FAILURE(rc))608 return rc;609 610 g_pvNstGstMsrBitmap = RTR0MemObjAddress(g_hMemObjNstGstMsrBitmap);611 g_HCPhysNstGstMsrBitmap = RTR0MemObjGetPagePhysAddr(g_hMemObjNstGstMsrBitmap, 0 /* iPage */);612 613 /* Set all bits to intercept all MSR accesses. */614 ASMMemFill32(g_pvNstGstMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));615 #endif616 617 591 return VINF_SUCCESS; 618 592 } … … 631 605 g_hMemObjIOBitmap = NIL_RTR0MEMOBJ; 632 606 } 633 634 #ifdef VBOX_WITH_NESTED_HWVIRT635 if (g_hMemObjNstGstMsrBitmap != NIL_RTR0MEMOBJ)636 {637 RTR0MemObjFree(g_hMemObjNstGstMsrBitmap, true /* fFreeMappings */);638 g_pvNstGstMsrBitmap = NULL;639 g_HCPhysNstGstMsrBitmap = 0;640 g_hMemObjNstGstMsrBitmap = NIL_RTR0MEMOBJ;641 }642 #endif643 607 } 644 608 … … 850 814 851 815 /** 852 * Sets the permission bits for the specified MSR in the MSRPM .853 * 854 * @param p Vmcb Pointer to the VM control block.816 * Sets the permission bits for the specified MSR in the MSRPM bitmap. 817 * 818 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context. 855 819 * @param pbMsrBitmap Pointer to the MSR bitmap. 856 * @param uMsr The MSR for which the accesspermissions are being set.820 * @param idMsr The MSR for which the permissions are being set. 857 821 * @param enmRead MSR read permissions. 858 822 * @param enmWrite MSR write permissions. 859 */ 860 static void hmR0SvmSetMsrPermission(PSVMVMCB pVmcb, uint8_t *pbMsrBitmap, unsigned uMsr, SVMMSREXITREAD enmRead, 823 * 824 * @remarks This function does -not- clear the VMCB clean bits for MSRPM. The 825 * caller needs to take care of this. 826 */ 827 static void hmR0SvmSetMsrPermission(PCPUMCTX pCtx, uint8_t *pbMsrBitmap, uint32_t idMsr, SVMMSREXITREAD enmRead, 861 828 SVMMSREXITWRITE enmWrite) 862 829 { 863 uint16_t offMsrpm; 864 uint32_t uMsrpmBit; 865 int rc = HMSvmGetMsrpmOffsetAndBit(uMsr, &offMsrpm, &uMsrpmBit); 830 bool const fInNestedGuestMode = CPUMIsGuestInSvmNestedHwVirtMode(pCtx); 831 uint16_t offMsrpm; 832 uint8_t uMsrpmBit; 833 int rc = HMSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit); 866 834 AssertRC(rc); 867 835 868 Assert(uMsrpmBit < 0x3fff);836 Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6); 869 837 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT); 870 838 … … 873 841 ASMBitSet(pbMsrBitmap, uMsrpmBit); 874 842 else 875 ASMBitClear(pbMsrBitmap, uMsrpmBit); 843 { 844 if (!fInNestedGuestMode) 845 ASMBitClear(pbMsrBitmap, uMsrpmBit); 846 #ifdef VBOX_WITH_NESTED_HWVIRT 847 else 848 { 849 /* Only clear the bit if the nested-guest is also not intercepting the MSR read.*/ 850 uint8_t const *pbNstGstMsrBitmap = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap); 851 pbNstGstMsrBitmap += offMsrpm; 852 if (!ASMBitTest(pbNstGstMsrBitmap, uMsrpmBit)) 853 ASMBitClear(pbMsrBitmap, uMsrpmBit); 854 else 855 Assert(ASMBitTest(pbMsrBitmap, uMsrpmBit)); 856 } 857 #endif 858 } 876 859 877 860 if (enmWrite == SVMMSREXIT_INTERCEPT_WRITE) 878 861 ASMBitSet(pbMsrBitmap, uMsrpmBit + 1); 879 862 else 880 ASMBitClear(pbMsrBitmap, uMsrpmBit + 1); 881 882 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM; 863 { 864 if (!fInNestedGuestMode) 865 ASMBitClear(pbMsrBitmap, uMsrpmBit + 1); 866 #ifdef VBOX_WITH_NESTED_HWVIRT 867 else 868 { 869 /* Only clear the bit if the nested-guest is also not intercepting the MSR write.*/ 870 uint8_t const *pbNstGstMsrBitmap = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap); 871 pbNstGstMsrBitmap += offMsrpm; 872 if (!ASMBitTest(pbNstGstMsrBitmap, uMsrpmBit + 1)) 873 ASMBitClear(pbMsrBitmap, uMsrpmBit + 1); 874 else 875 Assert(ASMBitTest(pbMsrBitmap, uMsrpmBit + 1)); 876 } 877 #endif 878 } 883 879 } 884 880 … … 1049 1045 */ 1050 1046 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap; 1051 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1052 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1053 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1054 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1055 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1056 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1057 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1058 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1059 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1060 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1047 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1048 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1049 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1050 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1051 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1052 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1053 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1054 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1055 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1056 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1057 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1058 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM; 1061 1059 } 1062 1060 … … 1969 1967 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */ 1970 1968 if (fPendingIntr) 1971 hmR0SvmSetMsrPermission(p Vmcb, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);1969 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE); 1972 1970 else 1973 1971 { 1974 hmR0SvmSetMsrPermission(p Vmcb, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);1972 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1975 1973 pVCpu->hm.s.svm.fSyncVTpr = true; 1976 1974 } 1975 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM; 1977 1976 } 1978 1977 else … … 2358 2357 2359 2358 #ifdef VBOX_WITH_NESTED_HWVIRT 2359 /** 2360 * Merges the guest and nested-guest MSR permission bitmap. 2361 * 2362 * If the guest is intercepting an MSR we need to intercept it regardless of 2363 * whether the nested-guest is intercepting it or not. 2364 * 2365 * @param pHostCpu Pointer to the physical CPU HM info. struct. 2366 * @param pVCpu The cross context virtual CPU structure. 2367 * @param pCtx Pointer to the nested-guest-CPU context. 2368 */ 2369 static void hmR0SvmMergeMsrpm(PHMGLOBALCPUINFO pHostCpu, PVMCPU pVCpu, PCPUMCTX pCtx) 2370 { 2371 uint64_t const *pu64GstMsrpm = (uint64_t const *)pVCpu->hm.s.svm.pvMsrBitmap; 2372 uint64_t const *pu64NstGstMsrpm = (uint64_t const *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap); 2373 uint64_t *pu64DstMsrpm = (uint64_t *)pHostCpu->n.svm.pvNstGstMsrpm; 2374 2375 /* MSRPM bytes from offset 0x1800 are reserved, so we stop merging there. */ 2376 uint32_t const offRsvdQwords = 0x1800 >> 3; 2377 for (uint32_t i = 0; i < offRsvdQwords; i++) 2378 pu64DstMsrpm[i] = pu64NstGstMsrpm[i] | pu64GstMsrpm[i]; 2379 } 2380 2381 2360 2382 /** 2361 2383 * Caches the nested-guest VMCB fields before we modify them for execution using … … 2431 2453 * The IOPM of the nested-guest can be ignored because the the guest always 2432 2454 * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather 2433 * into the nested-guest one and swap itback on the #VMEXIT.2455 * than the nested-guest IOPM and swap the field back on the #VMEXIT. 2434 2456 */ 2435 2457 pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap; 2436 2437 /*2438 * Load the host-physical address into the MSRPM rather than the nested-guest2439 * physical address (currently we trap all MSRs in the nested-guest).2440 */2441 pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap;2442 2458 2443 2459 /* 2444 2460 * Use the same nested-paging as the "outer" guest. We can't dynamically 2445 2461 * switch off nested-paging suddenly while executing a VM (see assertion at the 2446 * end of Trap0eHandler in PGMAllBth.h).2462 * end of Trap0eHandler() in PGMAllBth.h). 2447 2463 */ 2448 2464 pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging = pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging; … … 2457 2473 { 2458 2474 Assert(pVmcbNstGstCtrl->u64IOPMPhysAddr == g_HCPhysIOBitmap); 2459 Assert(pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap);2460 2475 Assert(RT_BOOL(pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging) == pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging); 2461 2476 } … … 2532 2547 return rc; 2533 2548 } 2534 #endif 2549 #endif /* VBOX_WITH_NESTED_HWVIRT */ 2535 2550 2536 2551 … … 4290 4305 hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcbNstGst); 4291 4306 4307 /* Pre-load the guest FPU state. */ 4292 4308 if (!CPUMIsGuestFPUStateActive(pVCpu)) 4293 4309 { … … 4306 4322 AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); 4307 4323 4324 PHMGLOBALCPUINFO pHostCpu = hmR0GetCurrentCpu(); 4325 RTCPUID const idCurrentCpu = pHostCpu->idCpu; 4326 bool const fMigratedCpu = idCurrentCpu != pVCpu->hm.s.idLastCpu; 4327 4308 4328 /* Setup TSC offsetting. */ 4309 RTCPUID idCurrentCpu = hmR0GetCurrentCpu()->idCpu;4310 4329 if ( pSvmTransient->fUpdateTscOffsetting 4311 || idCurrentCpu != pVCpu->hm.s.idLastCpu) /** @todo is this correct for nested-guests where 4312 nested-VCPU<->physical-CPU mapping doesn't exist. */ 4330 || fMigratedCpu) 4313 4331 { 4314 4332 hmR0SvmUpdateTscOffsettingNested(pVM, pVCpu, pCtx, pVmcbNstGst); … … 4317 4335 4318 4336 /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */ 4319 if ( idCurrentCpu != pVCpu->hm.s.idLastCpu)4337 if (fMigratedCpu) 4320 4338 pVmcbNstGst->ctrl.u32VmcbCleanBits = 0; 4321 4339 … … 4334 4352 } 4335 4353 pSvmTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu); 4354 4355 /* Merge the guest and nested-guest MSRPM. */ 4356 hmR0SvmMergeMsrpm(pHostCpu, pVCpu, pCtx); 4357 4358 /* Update the nested-guest VMCB to use the newly merged MSRPM. */ 4359 pVmcbNstGst->ctrl.u64MSRPMPhysAddr = pHostCpu->n.svm.HCPhysNstGstMsrpm; 4336 4360 4337 4361 /* The TLB flushing would've already been setup by the nested-hypervisor. */ … … 4355 4379 && !(pVmcbNstGst->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP)) 4356 4380 { 4357 hmR0SvmSetMsrPermission(pVmcbNstGst, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 4381 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 4382 pVmcbNstGst->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM; 4383 4358 4384 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX); 4359 4385 uint64_t u64GuestTscAux = CPUMR0GetGuestTscAux(pVCpu); … … 4364 4390 else 4365 4391 { 4366 hmR0SvmSetMsrPermission(pVmcbNstGst, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE); 4392 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE); 4393 pVmcbNstGst->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM; 4367 4394 pSvmTransient->fRestoreTscAuxMsr = false; 4368 4395 } … … 4469 4496 && !(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP)) 4470 4497 { 4471 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 4498 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 4499 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM; 4500 4472 4501 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX); 4473 4502 uint64_t u64GuestTscAux = CPUMR0GetGuestTscAux(pVCpu); … … 4478 4507 else 4479 4508 { 4480 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE); 4509 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE); 4510 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM; 4481 4511 pSvmTransient->fRestoreTscAuxMsr = false; 4482 4512 } … … 5031 5061 const uint16_t u16Port = pIoExitInfo->n.u16Port; 5032 5062 const SVMIOIOTYPE enmIoType = (SVMIOIOTYPE)pIoExitInfo->n.u1Type; 5033 const uint8_t cbReg = (pIoExitInfo->u >> SVM_IOIO_OP_SIZE_SHIFT)& 7;5063 const uint8_t cbReg = (pIoExitInfo->u >> SVM_IOIO_OP_SIZE_SHIFT) & 7; 5034 5064 const uint8_t cAddrSizeBits = ((pIoExitInfo->u >> SVM_IOIO_ADDR_SIZE_SHIFT) & 7) << 4; 5035 5065 const uint8_t iEffSeg = pIoExitInfo->n.u3SEG; … … 5122 5152 uint32_t const idMsr = pCtx->ecx; 5123 5153 uint16_t offMsrpm; 5124 uint 32_tuMsrpmBit;5154 uint8_t uMsrpmBit; 5125 5155 int rc = HMSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit); 5126 5156 if (RT_SUCCESS(rc)) 5127 5157 { 5128 void const *pvMsrBitmap = pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap); 5129 bool const fInterceptRead = ASMBitTest(pvMsrBitmap, (offMsrpm << 3) + uMsrpmBit); 5130 bool const fInterceptWrite = ASMBitTest(pvMsrBitmap, (offMsrpm << 3) + uMsrpmBit + 1); 5158 Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6); 5159 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT); 5160 5161 uint8_t const *pbMsrBitmap = (uint8_t const *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap); 5162 pbMsrBitmap += offMsrpm; 5163 bool const fInterceptRead = ASMBitTest(pbMsrBitmap, uMsrpmBit); 5164 bool const fInterceptWrite = ASMBitTest(pbMsrBitmap, uMsrpmBit + 1); 5131 5165 5132 5166 if ( (fInterceptWrite && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_WRITE)
Note:
See TracChangeset
for help on using the changeset viewer.