- Timestamp:
- Aug 4, 2017 6:05:13 AM (7 years ago)
- Location:
- trunk
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm.h
r68226 r68293 157 157 VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pSvmEvent); 158 158 VMM_INT_DECL(int) HMSvmGetMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint32_t *puMsrpmBit); 159 /** @} */160 161 /** @name Nested hardware virtualization.162 * @{163 */164 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfRipUpdated);165 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbInstr, RTGCPHYS GCPhysVmcb);166 VMM_INT_DECL(uint8_t) HMSvmNstGstGetInterrupt(PCCPUMCTX pCtx);167 VMM_INT_DECL(bool) HMSvmNstGstCanTakePhysInterrupt(PVMCPU pVCpu, PCCPUMCTX pCtx);168 VMM_INT_DECL(bool) HMSvmNstGstCanTakeVirtInterrupt(PVMCPU pVCpu, PCCPUMCTX pCtx);169 VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstHandleCtrlIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode,170 uint64_t uExitInfo1, uint64_t uExitInfo2);171 VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstHandleMsrIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t idMsr, bool fWrite);172 VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstHandleIOIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, PCSVMIOIOEXITINFO pIoExitInfo,173 uint64_t uNextRip);174 159 VMM_INT_DECL(bool) HMSvmIsIOInterceptActive(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg, 175 160 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, 176 161 PSVMIOIOEXITINFO pIoExitInfo); 162 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfRipUpdated); 163 /** @} */ 164 165 /** @name Nested hardware virtualization. 166 * @{ 167 */ 168 #ifdef VBOX_WITH_NESTED_HWVIRT 177 169 VMM_INT_DECL(void) HMSvmNstGstVmExitNotify(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst); 170 #endif 178 171 /** @} */ 179 172 -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r68275 r68293 330 330 331 331 332 #ifdef VBOX_WITH_NESTED_HWVIRT 332 333 /** 333 334 * Notification callback for when a \#VMEXIT happens outside SVM R0 code (e.g. … … 364 365 pNstGstVmcbCache->fVmrunEmulatedInR0 = false; 365 366 } 366 367 #endif 368 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r68279 r68293 295 295 /** Physical address of the IO bitmap. */ 296 296 RTHCPHYS g_HCPhysIOBitmap = 0; 297 /** Virtual address ofthe IO bitmap. */297 /** Pointer to the IO bitmap. */ 298 298 R0PTRTYPE(void *) g_pvIOBitmap = NULL; 299 299 300 #ifdef VBOX_WITH_NESTED_HWVIRT 301 /** Ring-0 memory object for the nested-guest MSRPM bitmap. */ 302 RTR0MEMOBJ g_hMemObjNstGstMsrBitmap = NIL_RTR0MEMOBJ; 303 /** Physical address of the nested-guest MSRPM bitmap. */ 304 RTHCPHYS g_HCPhysNstGstMsrBitmap = 0; 305 /** Pointer to the nested-guest MSRPM bitmap. */ 306 R0PTRTYPE(void *) g_pvNstGstMsrBitmap = NULL; 307 #endif 300 308 301 309 /** … … 426 434 /* Set all bits to intercept all IO accesses. */ 427 435 ASMMemFill32(g_pvIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff)); 436 437 #ifdef VBOX_WITH_NESTED_HWVIRT 438 /* 439 * Allocate 8 KB for the MSR permission bitmap for the nested-guest. 440 */ 441 Assert(g_hMemObjNstGstMsrBitmap == NIL_RTR0MEMOBJ); 442 rc = RTR0MemObjAllocCont(&g_hMemObjNstGstMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, false /* fExecutable */); 443 if (RT_FAILURE(rc)) 444 return rc; 445 446 g_pvNstGstMsrBitmap = RTR0MemObjAddress(g_hMemObjNstGstMsrBitmap); 447 g_HCPhysNstGstMsrBitmap = RTR0MemObjGetPagePhysAddr(g_hMemObjNstGstMsrBitmap, 0 /* iPage */); 448 449 /* Set all bits to intercept all MSR accesses. */ 450 ASMMemFill32(g_pvNstGstMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff)); 451 #endif 452 428 453 return VINF_SUCCESS; 429 454 } … … 442 467 g_hMemObjIOBitmap = NIL_RTR0MEMOBJ; 443 468 } 469 470 #ifdef VBOX_WITH_NESTED_HWVIRT 471 if (g_hMemObjNstGstMsrBitmap != NIL_RTR0MEMOBJ) 472 { 473 RTR0MemObjFree(g_hMemObjNstGstMsrBitmap, true /* fFreeMappings */); 474 g_pvNstGstMsrBitmap = NULL; 475 g_HCPhysNstGstMsrBitmap = 0; 476 g_hMemObjNstGstMsrBitmap = NIL_RTR0MEMOBJ; 477 } 478 #endif 444 479 } 445 480 … … 1921 1956 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR 1922 1957 | HM_CHANGED_GUEST_LAZY_MSRS /* Unused. */ 1958 | HM_CHANGED_SVM_NESTED_GUEST 1923 1959 | HM_CHANGED_SVM_RESERVED1 /* Reserved. */ 1924 1960 | HM_CHANGED_SVM_RESERVED2 1925 | HM_CHANGED_SVM_RESERVED3 1926 | HM_CHANGED_SVM_RESERVED4); 1961 | HM_CHANGED_SVM_RESERVED3); 1927 1962 1928 1963 /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */ … … 1939 1974 #ifdef VBOX_WITH_NESTED_HWVIRT 1940 1975 /** 1976 * Caches the nested-guest VMCB fields before we modify them for executing the 1977 * nested-guest under SVM R0. 1978 * 1979 * @param pCtx Pointer to the guest-CPU context. 1980 * 1981 * @sa HMSvmNstGstVmExitNotify. 1982 */ 1983 static void hmR0SvmVmRunCacheVmcb(PVMCPU pVCpu, PCPUMCTX pCtx) 1984 { 1985 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 1986 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 1987 PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 1988 1989 pNstGstVmcbCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx; 1990 pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx; 1991 pNstGstVmcbCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx; 1992 pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrDRx; 1993 pNstGstVmcbCache->u32InterceptXcpt = pVmcbNstGstCtrl->u32InterceptXcpt; 1994 pNstGstVmcbCache->u64InterceptCtrl = pVmcbNstGstCtrl->u64InterceptCtrl; 1995 pNstGstVmcbCache->u64IOPMPhysAddr = pVmcbNstGstCtrl->u64IOPMPhysAddr; 1996 pNstGstVmcbCache->u64MSRPMPhysAddr = pVmcbNstGstCtrl->u64MSRPMPhysAddr; 1997 pNstGstVmcbCache->u64VmcbCleanBits = pVmcbNstGstCtrl->u64VmcbCleanBits; 1998 pNstGstVmcbCache->fVIntrMasking = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking; 1999 pNstGstVmcbCache->fValid = true; 2000 } 2001 2002 2003 /** 2004 * Sets up the nested-guest for hardware-assisted SVM execution. 2005 * 2006 * @param pVCpu The cross context virtual CPU structure. 2007 * @param pCtx Pointer to the guest-CPU context. 2008 */ 2009 static void hmR0SvmLoadGuestVmcbNested(PVMCPU pVCpu, PCPUMCTX pCtx) 2010 { 2011 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_SVM_NESTED_GUEST)) 2012 { 2013 /* 2014 * Cache the nested-guest VMCB fields before we start modifying them below. 2015 */ 2016 hmR0SvmVmRunCacheVmcb(pVCpu, pCtx); 2017 2018 /* 2019 * The IOPM of the nested-guest can be ignored because the the guest always 2020 * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather 2021 * into the nested-guest one and swap it back on the #VMEXIT. 2022 */ 2023 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 2024 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 2025 pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap; 2026 2027 /* 2028 * Load the host-physical address into the MSRPM rather than the nested-guest 2029 * physical address. 2030 */ 2031 pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap; 2032 2033 /* 2034 * Merge the guest exception intercepts in to the nested-guest ones. 2035 */ 2036 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 2037 hmR0SvmMergeIntercepts(pVCpu, pVmcb, pVmcbNstGst); 2038 2039 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_NESTED_GUEST); 2040 } 2041 } 2042 2043 2044 /** 1941 2045 * Loads the nested-guest state into the VMCB. 1942 2046 * … … 1959 2063 if (!pVCpu->hm.s.svm.NstGstVmcbCache.fVmrunEmulatedInR0) 1960 2064 { 2065 hmR0SvmLoadGuestVmcbNested(pVCpu, pCtx); 1961 2066 hmR0SvmLoadGuestControlRegsNested(pVCpu, pVmcbNstGst, pCtx); 1962 2067 hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcbNstGst, pCtx); … … 1985 2090 | HM_CHANGED_SVM_RESERVED1 /* Reserved. */ 1986 2091 | HM_CHANGED_SVM_RESERVED2 1987 | HM_CHANGED_SVM_RESERVED3 1988 | HM_CHANGED_SVM_RESERVED4); 2092 | HM_CHANGED_SVM_RESERVED3); 1989 2093 1990 2094 /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */ … … 4271 4375 if (RT_SUCCESS(rc)) 4272 4376 { 4273 void const *pvMsrBitmap = pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);4274 bool const fInterceptR d= ASMBitTest(pvMsrBitmap, (offMsrpm << 3) + uMsrpmBit);4275 bool const fInterceptWr = ASMBitTest(pvMsrBitmap, (offMsrpm << 3) + uMsrpmBit + 1);4276 4277 if ( ( pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_WRITE && fInterceptWr)4278 || ( pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_READ && fInterceptRd))4377 void const *pvMsrBitmap = pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap); 4378 bool const fInterceptRead = ASMBitTest(pvMsrBitmap, (offMsrpm << 3) + uMsrpmBit); 4379 bool const fInterceptWrite = ASMBitTest(pvMsrBitmap, (offMsrpm << 3) + uMsrpmBit + 1); 4380 4381 if ( (fInterceptWrite && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_WRITE) 4382 || (fInterceptRead && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_READ)) 4279 4383 { 4280 4384 return hmR0SvmExecVmexit(pVCpu, pCtx); … … 5443 5547 #if defined(VBOX_WITH_NESTED_HWVIRT) && !defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) 5444 5548 /** 5445 * Merges the guest MSR permission bitmap into the nested-guest MSR permission5446 * bitmap.5447 *5448 * @param pVCpu The cross context virtual CPU structure.5449 * @param pvMsrBitmap Pointer to the guest MSRPM bitmap.5450 * @param pvNstGstMsrBitmap Pointer to the nested-guest MSRPM bitmap.5451 */5452 static void hmR0SvmMergeMsrpmBitmap(PVMCPU pVCpu, const void *pvMsrBitmap, void *pvNstGstMsrBitmap)5453 {5454 RT_NOREF(pVCpu);5455 uint64_t const *puChunk = (uint64_t *)pvMsrBitmap;5456 uint64_t *puNstGstChunk = (uint64_t *)pvNstGstMsrBitmap;5457 uint32_t const cbChunks = SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT;5458 uint32_t const cChunks = cbChunks / sizeof(*puChunk);5459 Assert(cbChunks % sizeof(*puChunk) == 0);5460 5461 for (uint32_t idxChunk = 0, offChunk = 0;5462 idxChunk < cChunks;5463 idxChunk++, offChunk += sizeof(*puChunk))5464 {5465 /* Leave reserved offsets (1800h+) untouched (as all bits set, see SVMR0InitVM). */5466 if (offChunk >= 0x1800)5467 break;5468 puNstGstChunk[idxChunk] |= puChunk[idxChunk];5469 }5470 }5471 5472 5473 /**5474 5549 * Performs an SVM world-switch (VMRUN, \#VMEXIT) updating PGM and HM internals. 5475 5550 * … … 5495 5570 5496 5571 /** 5497 * Performs a \#VMEXIT that happens during VMRUN emulation in hmR0SvmExecVmrun. 5572 * Performs a \#VMEXIT when the VMRUN was emulating using hmR0SvmExecVmrun and 5573 * optionally then through SVM R0 execution. 5498 5574 * 5499 5575 * @returns VBox status code. … … 5543 5619 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); 5544 5620 5621 /* 5622 * Make sure if VMRUN happens outside this SVM R0 code, we don't skip setting 5623 * things up that are required for executing the nested-guest using hardware-assisted SVM. 5624 */ 5625 pVCpu->hm.s.svm.NstGstVmcbCache.fVmrunEmulatedInR0 = false; 5626 5545 5627 if (RT_SUCCESS(rc)) 5546 5628 { … … 5552 5634 Log(("hmR0SvmExecVmexit: Failed to write guest-VMCB at %#RGp\n", GCPhysVmcb)); 5553 5635 return rc; 5554 }5555 5556 5557 /**5558 * Caches the nested-guest VMCB fields before we modify them for executing the5559 * nested-guest under SVM R0.5560 *5561 * @param pCtx Pointer to the guest-CPU context.5562 */5563 static void hmR0SvmVmRunCacheVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)5564 {5565 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);5566 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;5567 PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache;5568 5569 pNstGstVmcbCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx;5570 pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx;5571 pNstGstVmcbCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx;5572 pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrDRx;5573 pNstGstVmcbCache->u32InterceptXcpt = pVmcbNstGstCtrl->u32InterceptXcpt;5574 pNstGstVmcbCache->u64InterceptCtrl = pVmcbNstGstCtrl->u64InterceptCtrl;5575 pNstGstVmcbCache->u64IOPMPhysAddr = pVmcbNstGstCtrl->u64IOPMPhysAddr;5576 pNstGstVmcbCache->u64MSRPMPhysAddr = pVmcbNstGstCtrl->u64MSRPMPhysAddr;5577 pNstGstVmcbCache->u64VmcbCleanBits = pVmcbNstGstCtrl->u64VmcbCleanBits;5578 pNstGstVmcbCache->fVIntrMasking = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;5579 pNstGstVmcbCache->fValid = true;5580 5636 } 5581 5637 … … 5706 5762 5707 5763 /* 5708 * Cache the nested-guest VMCB fields before we start modifying them below.5764 * Set up the nested-guest for executing it using hardware-assisted SVM. 5709 5765 */ 5710 hmR0SvmVmRunCacheVmcb(pVCpu, pCtx); 5711 5712 /* 5713 * The IOPM of the nested-guest can be ignored because the the guest always 5714 * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather 5715 * into the nested-guest one and swap it back on the #VMEXIT. 5716 */ 5717 pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap; 5718 5719 /* 5720 * Load the host-physical address into the MSRPM rather than the nested-guest 5721 * physical address. 5722 */ 5723 pVmcbNstGstCtrl->u64MSRPMPhysAddr = HCPhysNstGstMsrpm; 5724 5725 /* 5726 * Merge the guest MSR permission bitmap in to the nested-guest one. 5727 * 5728 * Note the assumption here is that our MSRPM is set up only once in SVMR0SetupVM 5729 * In hmR0SvmPreRunGuestCommittedNested we directly update the nested-guest one. 5730 * Hence it can be done once here during VMRUN. 5731 */ 5732 hmR0SvmMergeMsrpmBitmap(pVCpu, pVCpu->hm.s.svm.pvMsrBitmap, pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap)); 5733 5734 /* 5735 * Merge the guest exception intercepts in to the nested-guest ones. 5736 */ 5737 { 5738 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 5739 hmR0SvmMergeIntercepts(pVCpu, pVmcb, pVmcbNstGst); 5740 } 5766 hmR0SvmLoadGuestVmcbNested(pVCpu, pCtx); 5741 5767 5742 5768 /* -
trunk/src/VBox/VMM/include/HMInternal.h
r68226 r68293 186 186 #define HM_CHANGED_SVM_RESERVED2 RT_BIT(21) 187 187 #define HM_CHANGED_SVM_RESERVED3 RT_BIT(22) 188 #define HM_CHANGED_SVM_ RESERVED4RT_BIT(23)188 #define HM_CHANGED_SVM_NESTED_GUEST RT_BIT(23) 189 189 190 190 #define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \
Note:
See TracChangeset
for help on using the changeset viewer.