Changeset 66040 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Mar 10, 2017 4:18:12 PM (8 years ago)
- svn:sync-xref-src-repo-rev:
- 113906
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r66015 r66040 179 179 * 180 180 * @returns Strict VBox status code (i.e. informational status codes too). 181 *182 181 * @param pVCpu The cross context virtual CPU structure. 183 182 * @param pCtx Pointer to the guest-CPU context. 184 * @param pVmcb The VMCB of the nested-guest. 185 * @param pHostState The host-state save area in the guest. 186 */ 187 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb, PSVMHOSTSTATE pHostState) 188 { 189 Assert(pHostState); 190 Assert(pVmcb); 191 192 /* 193 * Save host state. 194 */ 195 pHostState->es = pCtx->es; 196 pHostState->cs = pCtx->cs; 197 pHostState->ss = pCtx->ss; 198 pHostState->ds = pCtx->ds; 199 pHostState->gdtr = pCtx->gdtr; 200 pHostState->idtr = pCtx->idtr; 201 pHostState->uEferMsr = pCtx->msrEFER; 202 pHostState->uCr0 = pCtx->cr0; 203 pHostState->uCr3 = pCtx->cr3; 204 pHostState->uCr4 = pCtx->cr4; 205 pHostState->rflags = pCtx->rflags; 206 pHostState->uRip = pCtx->rip; 207 pHostState->uRsp = pCtx->rsp; 208 pHostState->uRax = pCtx->rax; 209 210 /* 211 * Load controls from VMCB. 212 */ 213 pCtx->hwvirt.svm.u16InterceptRdCRx = pVmcb->ctrl.u16InterceptRdCRx; 214 pCtx->hwvirt.svm.u16InterceptWrCRx = pVmcb->ctrl.u16InterceptWrCRx; 215 pCtx->hwvirt.svm.u16InterceptRdDRx = pVmcb->ctrl.u16InterceptRdDRx; 216 pCtx->hwvirt.svm.u16InterceptWrDRx = pVmcb->ctrl.u16InterceptWrDRx; 217 pCtx->hwvirt.svm.u64InterceptCtrl = pVmcb->ctrl.u64InterceptCtrl; 218 pCtx->hwvirt.svm.u32InterceptXcpt = pVmcb->ctrl.u32InterceptXcpt; 219 if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN)) 220 { 221 Log(("HMSvmVmRun: VMRUN instruction not intercepted -> #VMEXIT\n")); 222 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 223 } 224 if (!pVmcb->ctrl.TLBCtrl.n.u32ASID) 225 { 226 Log(("HMSvmVmRun: Guest ASID is invalid -> #VMEXIT\n")); 227 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 228 } 229 230 231 /** @todo the rest. */ 232 233 return VERR_NOT_IMPLEMENTED; 183 * @param GCPhysVmcb Guest physical address of the VMCB to run. 184 */ 185 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPHYS GCPhysVmcb) 186 { 187 Assert(pVCpu); 188 Assert(pCtx); 189 190 /* 191 * Cache the physical address of the VMCB for #VMEXIT exceptions. 192 */ 193 pCtx->hwvirt.svm.GCPhysVmcb = GCPhysVmcb; 194 195 SVMVMCB Vmcb; 196 PVM pVM = pVCpu->CTX_SUFF(pVM); 197 int rc = PGMPhysSimpleReadGCPhys(pVM, &Vmcb, GCPhysVmcb, X86_PAGE_4K_SIZE); 198 if (RT_SUCCESS(rc)) 199 { 200 /* 201 * Save host state. 202 */ 203 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState; 204 pHostState->es = pCtx->es; 205 pHostState->cs = pCtx->cs; 206 pHostState->ss = pCtx->ss; 207 pHostState->ds = pCtx->ds; 208 pHostState->gdtr = pCtx->gdtr; 209 pHostState->idtr = pCtx->idtr; 210 pHostState->uEferMsr = pCtx->msrEFER; 211 pHostState->uCr0 = pCtx->cr0; 212 pHostState->uCr3 = pCtx->cr3; 213 pHostState->uCr4 = pCtx->cr4; 214 pHostState->rflags = pCtx->rflags; 215 pHostState->uRip = pCtx->rip; 216 pHostState->uRsp = pCtx->rsp; 217 pHostState->uRax = pCtx->rax; 218 219 /* 220 * Cache the VMCB controls. 221 */ 222 pCtx->hwvirt.svm.VmcbCtrl = Vmcb.ctrl; 223 224 /* 225 * Validate the VMCB controls. 226 */ 227 if (!CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_VMRUN)) 228 { 229 Log(("HMSvmVmRun: VMRUN instruction not intercepted -> #VMEXIT\n")); 230 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 231 } 232 if ( pCtx->hwvirt.svm.VmcbCtrl.NestedPaging.n.u1NestedPaging 233 && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fNestedPaging) 234 { 235 Log(("HMSvmVmRun: Nested paging not supported -> #VMEXIT\n")); 236 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 237 } 238 if (!pCtx->hwvirt.svm.VmcbCtrl.TLBCtrl.n.u32ASID) 239 { 240 Log(("HMSvmVmRun: Guest ASID is invalid -> #VMEXIT\n")); 241 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 242 } 243 244 /** @todo the rest. */ 245 246 return VERR_NOT_IMPLEMENTED; 247 } 248 249 return rc; 234 250 } 235 251 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r66015 r66040 5900 5900 #endif 5901 5901 5902 void *pvVmcb; 5903 PGMPAGEMAPLOCK PgLockVmcb; 5904 VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_RW, &pvVmcb, &PgLockVmcb); 5905 if (rcStrict == VINF_SUCCESS) 5906 { 5907 pCtx->hwvirt.svm.GCPhysNstGstVmcb = GCPhysVmcb; 5908 5909 RTGCPHYS GCPhysHostState = pCtx->hwvirt.svm.uMsrHSavePa; 5910 /** @todo SVM does not validate the host-state area beyond checking the 5911 * alignment and range of the physical address. Nothing to prevent users 5912 * from using MMIO or other weird stuff in which case anything might 5913 * happen. */ 5914 void *pvHostState; 5915 PGMPAGEMAPLOCK PgLockHostState; 5916 rcStrict = iemMemPageMap(pVCpu, GCPhysHostState, IEM_ACCESS_DATA_RW, &pvHostState, &PgLockHostState); 5917 if (rcStrict == VINF_SUCCESS) 5918 { 5919 PSVMHOSTSTATE pHostState = (PSVMHOSTSTATE)pvHostState; 5920 PSVMVMCB pVmcb = (PSVMVMCB)pvVmcb; 5921 rcStrict = HMSvmVmrun(pVCpu, pCtx, pVmcb, pHostState); 5922 5923 iemMemPageUnmap(pVCpu, GCPhysHostState, IEM_ACCESS_DATA_RW, pvHostState, &PgLockHostState); 5924 } 5925 iemMemPageUnmap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_RW, pvVmcb, &PgLockVmcb); 5926 } 5902 rcStrict = HMSvmVmrun(pVCpu, pCtx, ); 5927 5903 RT_NOREF(cbInstr); 5928 5904 return rcStrict; -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r65905 r66040 1680 1680 pFeatures->fXop = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_XOP); 1681 1681 pFeatures->fSvm = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM); 1682 if (pFeatures->fSvm) 1683 { 1684 PCCPUMCPUIDLEAF pSvmLeaf = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x8000000a); 1685 AssertLogRelReturn(pSvmLeaf, VERR_CPUM_IPE_1); 1686 pFeatures->svm.feat.u = pSvmLeaf->uEdx; 1687 pFeatures->svm.uMaxAsid = pSvmLeaf->uEbx; 1688 } 1682 1689 } 1683 1690 … … 3363 3370 pSvmFeatureLeaf->uEbx = 0x8000; /** @todo figure out virtual NASID. */ 3364 3371 pSvmFeatureLeaf->uEcx = 0; 3365 pSvmFeatureLeaf->uEdx = 0; /** @todo Support SVM features */3372 pSvmFeatureLeaf->uEdx = 0; /** @todo Support SVM features */ 3366 3373 } 3367 3374 else -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r66000 r66040 231 231 .Guest.abPadding resb 12 232 232 %endif 233 .Guest.hwvirt.svm.uMsrHSavePa resq 1 234 .Guest.hwvirt.svm.u64InterceptCtrl resq 1 235 .Guest.hwvirt.svm.u32InterceptXcpt resd 1 236 .Guest.hwvirt.svm.u16InterceptRdCRx resw 1 237 .Guest.hwvirt.svm.u16InterceptWrCRx resw 1 238 .Guest.hwvirt.svm.u16InterceptRdDRx resw 1 239 .Guest.hwvirt.svm.u16InterceptWrDRx resw 1 240 .Guest.hwvirt.svm.fGif resb 1 241 .Guest.hwvirt.svm.abPadding resb 3 242 .Guest.hwvirt.svm.GCPhysNstGstVmcb resq 1 233 .Guest.hwvirt.svm.uMsrHSavePa resq 1 234 .Guest.hwvirt.svm.GCPhysVmcb resq 1 235 .Guest.hwvirt.svm.VmcbCtrl resb 256 236 .Guest.hwvirt.svm.HostState resb 184 237 .Guest.hwvirt.svm.fGif resb 1 243 238 alignb 64 244 239 … … 504 499 .Hyper.abPadding resb 12 505 500 %endif 506 .Hyper.hwvirt.svm.uMsrHSavePa resq 1 507 .Hyper.hwvirt.svm.u64InterceptCtrl resq 1 508 .Hyper.hwvirt.svm.u32InterceptXcpt resd 1 509 .Hyper.hwvirt.svm.u16InterceptRdCRx resw 1 510 .Hyper.hwvirt.svm.u16InterceptWrCRx resw 1 511 .Hyper.hwvirt.svm.u16InterceptRdDRx resw 1 512 .Hyper.hwvirt.svm.u16InterceptWrDRx resw 1 513 .Hyper.hwvirt.svm.fGif resb 1 514 .Hyper.hwvirt.svm.abPadding resb 3 515 .Hyper.hwvirt.svm.GCPhysNstGstVmcb resq 1 501 .Hyper.hwvirt.svm.uMsrHSavePa resq 1 502 .Hyper.hwvirt.svm.GCPhysVmcb resq 1 503 .Hyper.hwvirt.svm.VmcbCtrl resb 256 504 .Hyper.hwvirt.svm.HostState resb 184 505 .Hyper.hwvirt.svm.fGif resb 1 516 506 alignb 64 517 507 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r65904 r66040 133 133 GEN_CHECK_OFF(CPUMCTX, hwvirt); 134 134 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.uMsrHSavePa); 135 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.GCPhysVmcb); 136 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.VmcbCtrl); 137 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.HostState); 135 138 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.fGif); 136 139 /** @todo add rest of hwvirt fields when code is more
Note:
See TracChangeset
for help on using the changeset viewer.