Changeset 91289 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Sep 16, 2021 9:49:50 PM (3 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
r91287 r91289 575 575 * Copy the MSR permission bitmap into the cache. 576 576 */ 577 Assert (pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap));578 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->cpum.GstCtx.hwvirt.svm. CTX_SUFF(pvMsrBitmap), GCPhysMsrBitmap,579 SVM_MSRPM_PAGES * X86_PAGE_4K_SIZE);577 AssertCompile(sizeof(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap) == SVM_MSRPM_PAGES * X86_PAGE_4K_SIZE); 578 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap, GCPhysMsrBitmap, 579 sizeof(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap)); 580 580 if (RT_FAILURE(rc)) 581 581 { … … 1050 1050 * Check if the bit is set, if so, trigger a #VMEXIT. 1051 1051 */ 1052 uint8_t *pbMsrpm = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap); 1053 pbMsrpm += offMsrpm; 1054 if (*pbMsrpm & RT_BIT(uMsrpmBit)) 1052 if (pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[offMsrpm] & RT_BIT(uMsrpmBit)) 1055 1053 { 1056 1054 IEM_SVM_UPDATE_NRIP(pVCpu); -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r91288 r91289 945 945 { 946 946 /* Only clear the bit if the nested-guest is also not intercepting the MSR read.*/ 947 uint8_t const *pbNstGstMsrBitmap = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap); 948 pbNstGstMsrBitmap += offMsrpm; 949 if (!(*pbNstGstMsrBitmap & RT_BIT(uMsrpmBit))) 947 if (!(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[offMsrpm] & RT_BIT(uMsrpmBit))) 950 948 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit); 951 949 else … … 965 963 { 966 964 /* Only clear the bit if the nested-guest is also not intercepting the MSR write.*/ 967 uint8_t const *pbNstGstMsrBitmap = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap); 968 pbNstGstMsrBitmap += offMsrpm; 969 if (!(*pbNstGstMsrBitmap & RT_BIT(uMsrpmBit + 1))) 965 if (!(pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[offMsrpm] & RT_BIT(uMsrpmBit + 1))) 970 966 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit + 1); 971 967 else … … 2510 2506 { 2511 2507 uint64_t const *pu64GstMsrpm = (uint64_t const *)pVCpu->hmr0.s.svm.pvMsrBitmap; 2512 uint64_t const *pu64NstGstMsrpm = (uint64_t const *) pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap);2508 uint64_t const *pu64NstGstMsrpm = (uint64_t const *)&pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[0]; 2513 2509 uint64_t *pu64DstMsrpm = (uint64_t *)pHostCpu->n.svm.pvNstGstMsrpm; 2514 2510 … … 4939 4935 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT); 4940 4936 4941 uint8_t const *pbMsrBitmap = (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap); 4942 pbMsrBitmap += offMsrpm; 4937 uint8_t const * const pbMsrBitmap = &pVCpu->cpum.GstCtx.hwvirt.svm.abMsrBitmap[offMsrpm]; 4943 4938 bool const fInterceptRead = RT_BOOL(*pbMsrBitmap & RT_BIT(uMsrpmBit)); 4944 4939 bool const fInterceptWrite = RT_BOOL(*pbMsrBitmap & RT_BIT(uMsrpmBit + 1)); -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r91287 r91289 1022 1022 PVMCPU pVCpu = pVM->apCpusR3[i]; 1023 1023 1024 if (pVCpu->cpum.s.Guest.hwvirt.svm.pvMsrBitmapR3)1025 {1026 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.svm.pvMsrBitmapR3, SVM_MSRPM_PAGES);1027 pVCpu->cpum.s.Guest.hwvirt.svm.pvMsrBitmapR3 = NULL;1028 }1029 1030 1024 if (pVCpu->cpum.s.Guest.hwvirt.svm.pvIoBitmapR3) 1031 1025 { … … 1055 1049 pVCpu->cpum.s.Guest.hwvirt.enmHwvirt = CPUMHWVIRT_SVM; 1056 1050 1057 AssertCompile(SVM_VMCB_PAGES * PAGE_SIZE == sizeof(pVCpu->cpum.s.Guest.hwvirt.svm.Vmcb)); 1058 1059 /* 1060 * Allocate the MSRPM (MSR Permission bitmap). 1061 * 1062 * This need not be physically contiguous pages because we use the one from 1063 * HMPHYSCPU while executing the nested-guest using hardware-assisted SVM. 1064 * This one is just used for caching the bitmap from guest physical memory. 1065 */ 1066 Assert(!pVCpu->cpum.s.Guest.hwvirt.svm.pvMsrBitmapR3); 1067 rc = SUPR3PageAllocEx(SVM_MSRPM_PAGES, 0 /* fFlags */, &pVCpu->cpum.s.Guest.hwvirt.svm.pvMsrBitmapR3, 1068 &pVCpu->cpum.s.Guest.hwvirt.svm.pvMsrBitmapR0, NULL /* paPages */); 1069 if (RT_FAILURE(rc)) 1070 { 1071 Assert(!pVCpu->cpum.s.Guest.hwvirt.svm.pvMsrBitmapR3); 1072 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's MSR permission bitmap\n", pVCpu->idCpu, 1073 SVM_MSRPM_PAGES)); 1074 break; 1075 } 1051 AssertCompile(SVM_VMCB_PAGES * X86_PAGE_SIZE == sizeof(pVCpu->cpum.s.Guest.hwvirt.svm.Vmcb)); 1052 AssertCompile(SVM_MSRPM_PAGES * X86_PAGE_SIZE == sizeof(pVCpu->cpum.s.Guest.hwvirt.svm.abMsrBitmap)); 1076 1053 1077 1054 /* … … 2652 2629 SSMR3PutStructEx(pSSM, &pGstCtx->hwvirt.svm.HostState, sizeof(pGstCtx->hwvirt.svm.HostState), 0 /* fFlags */, 2653 2630 g_aSvmHwvirtHostState, NULL /* pvUser */); 2654 SSMR3PutMem(pSSM, &pGstCtx->hwvirt.svm.Vmcb, sizeof(pGstCtx->hwvirt.svm.Vmcb));2655 SSMR3PutMem(pSSM, pGstCtx->hwvirt.svm.pvMsrBitmapR3, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);2631 SSMR3PutMem(pSSM, &pGstCtx->hwvirt.svm.Vmcb, sizeof(pGstCtx->hwvirt.svm.Vmcb)); 2632 SSMR3PutMem(pSSM, &pGstCtx->hwvirt.svm.abMsrBitmap[0], sizeof(pGstCtx->hwvirt.svm.abMsrBitmap)); 2656 2633 SSMR3PutMem(pSSM, pGstCtx->hwvirt.svm.pvIoBitmapR3, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT); 2657 2634 SSMR3PutU32(pSSM, pGstCtx->hwvirt.fLocalForcedActions); … … 2939 2916 0 /* fFlags */, g_aSvmHwvirtHostState, NULL /* pvUser */); 2940 2917 SSMR3GetMem(pSSM, &pGstCtx->hwvirt.svm.Vmcb, sizeof(pGstCtx->hwvirt.svm.Vmcb)); 2941 SSMR3GetMem(pSSM, pGstCtx->hwvirt.svm.pvMsrBitmapR3, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);2918 SSMR3GetMem(pSSM, &pGstCtx->hwvirt.svm.abMsrBitmap[0], sizeof(pGstCtx->hwvirt.svm.abMsrBitmap)); 2942 2919 SSMR3GetMem(pSSM, pGstCtx->hwvirt.svm.pvIoBitmapR3, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT); 2943 2920 SSMR3GetU32(pSSM, &pGstCtx->hwvirt.fLocalForcedActions); … … 4157 4134 pHlp->pfnPrintf(pHlp, " cPauseFilterThreshold = %RU32\n", pCtx->hwvirt.svm.cPauseFilterThreshold); 4158 4135 pHlp->pfnPrintf(pHlp, " fInterceptEvents = %u\n", pCtx->hwvirt.svm.fInterceptEvents); 4159 pHlp->pfnPrintf(pHlp, " pvMsrBitmapR3 = %p\n", pCtx->hwvirt.svm.pvMsrBitmapR3);4160 pHlp->pfnPrintf(pHlp, " pvMsrBitmapR0 = %RKv\n", pCtx->hwvirt.svm.pvMsrBitmapR0);4161 4136 pHlp->pfnPrintf(pHlp, " pvIoBitmapR3 = %p\n", pCtx->hwvirt.svm.pvIoBitmapR3); 4162 4137 pHlp->pfnPrintf(pHlp, " pvIoBitmapR0 = %RKv\n", pCtx->hwvirt.svm.pvIoBitmapR0); -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r91287 r91289 231 231 232 232 alignb 4096 233 .Guest.hwvirt.svm.Vmcb resb 4096 233 .Guest.hwvirt.svm.Vmcb resb 4096 234 .Guest.hwvirt.svm.abMsrBitmap resb 8192 234 235 .Guest.hwvirt.svm.uMsrHSavePa resq 1 235 236 .Guest.hwvirt.svm.GCPhysVmcb resq 1 … … 240 241 .Guest.hwvirt.svm.cPauseFilterThreshold resw 1 241 242 .Guest.hwvirt.svm.fInterceptEvents resb 1 242 alignb 8243 .Guest.hwvirt.svm.pvMsrBitmapR0 RTR0PTR_RES 1244 alignb 8245 .Guest.hwvirt.svm.pvMsrBitmapR3 RTR3PTR_RES 1246 243 alignb 8 247 244 .Guest.hwvirt.svm.pvIoBitmapR0 RTR0PTR_RES 1 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r91287 r91289 129 129 GEN_CHECK_OFF(CPUMCTX, hwvirt); 130 130 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.Vmcb); 131 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.abMsrBitmap); 131 132 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.uMsrHSavePa); 132 133 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.GCPhysVmcb); … … 136 137 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.cPauseFilterThreshold); 137 138 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.fInterceptEvents); 138 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvMsrBitmapR0);139 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvMsrBitmapR3);140 139 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvIoBitmapR0); 141 140 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvIoBitmapR3);
Note:
See TracChangeset
for help on using the changeset viewer.