Changeset 72848 in vbox
- Timestamp:
- Jul 4, 2018 5:01:54 AM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 123393
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72825 r72848 170 170 } while (0) 171 171 172 /** Macro for saving segment registers from VMCS into the guest-CPU 173 * context. */ 172 /** Macro for importing segment registers to the VMCS from the guest-CPU context. */ 174 173 #ifdef VMX_USE_CACHED_VMCS_ACCESSES 175 174 # define HMVMX_IMPORT_SREG(Sel, a_pCtxSelReg) \ … … 181 180 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg)) 182 181 #endif 182 183 /** Macro for exporting segment registers to the VMCS from the guest-CPU context. */ 184 # define HMVMX_EXPORT_SREG(Sel, a_pCtxSelReg) \ 185 hmR0VmxExportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \ 186 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg)) 183 187 184 188 … … 3167 3171 #endif 3168 3172 3169 PVM pVM= pVCpu->CTX_SUFF(pVM);3170 uint64_t u64HostEfer = pVM->hm.s.vmx.u64HostEfer;3171 uint64_t u64GuestEfer = pMixedCtx->msrEFER;3173 PVM pVM = pVCpu->CTX_SUFF(pVM); 3174 uint64_t const u64HostEfer = pVM->hm.s.vmx.u64HostEfer; 3175 uint64_t const u64GuestEfer = pMixedCtx->msrEFER; 3172 3176 3173 3177 /* … … 4216 4220 * 4217 4221 * The reason we check for attribute value 0 in this function and not just the unusable bit is 4218 * because hmR0Vmx WriteSegmentReg() only updates the VMCS' copy of the value with the unusable bit4222 * because hmR0VmxExportGuestSegmentReg() only updates the VMCS' copy of the value with the unusable bit 4219 4223 * and doesn't change the guest-context value. 4220 4224 */ … … 4262 4266 || (pCtx->ss.Attr.n.u1Granularity)); 4263 4267 } 4264 /* DS, ES, FS, GS - only check for usable selectors, see hmR0Vmx WriteSegmentReg(). */4268 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSegmentReg(). */ 4265 4269 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE)) 4266 4270 { … … 4332 4336 { 4333 4337 /* Real and v86 mode checks. */ 4334 /* hmR0Vmx WriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */4338 /* hmR0VmxExportGuestSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */ 4335 4339 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr; 4336 4340 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) … … 4381 4385 4382 4386 /** 4383 * Writes a guest segment register into the guest-state area in the VMCS.4387 * Exports a guest segment register into the guest-state area in the VMCS. 4384 4388 * 4385 4389 * @returns VBox status code. … … 4393 4397 * @remarks No-long-jump zone!!! 4394 4398 */ 4395 static int hmR0Vmx WriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,4396 uint32_t idxAccess,PCCPUMSELREG pSelReg)4399 static int hmR0VmxExportGuestSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess, 4400 PCCPUMSELREG pSelReg) 4397 4401 { 4398 4402 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */ … … 4456 4460 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK) 4457 4461 { 4458 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */4459 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)4460 {4461 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pMixedCtx->cs.Attr.u;4462 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pMixedCtx->ss.Attr.u;4463 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pMixedCtx->ds.Attr.u;4464 pVCpu->hm.s.vmx.RealMode.AttrES.u = pMixedCtx->es.Attr.u;4465 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pMixedCtx->fs.Attr.u;4466 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pMixedCtx->gs.Attr.u;4467 }4468 4469 4462 #ifdef VBOX_WITH_REM 4470 4463 if (!pVM->hm.s.vmx.fUnrestrictedGuest) … … 4483 4476 } 4484 4477 #endif 4485 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_CS_SEL, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE, 4486 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs); 4487 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_SS_SEL, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE, 4488 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss); 4489 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_DS_SEL, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE, 4490 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds); 4491 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_ES_SEL, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE, 4492 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es); 4493 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FS_SEL, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE, 4494 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs); 4495 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_GS_SEL, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE, 4496 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs); 4497 AssertRCReturn(rc, rc); 4478 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CS) 4479 { 4480 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4481 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pMixedCtx->cs.Attr.u; 4482 rc = HMVMX_EXPORT_SREG(CS, &pMixedCtx->cs); 4483 AssertRCReturn(rc, rc); 4484 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CS); 4485 } 4486 4487 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SS) 4488 { 4489 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4490 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pMixedCtx->ss.Attr.u; 4491 rc = HMVMX_EXPORT_SREG(SS, &pMixedCtx->ss); 4492 AssertRCReturn(rc, rc); 4493 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SS); 4494 } 4495 4496 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_DS) 4497 { 4498 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4499 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pMixedCtx->ds.Attr.u; 4500 rc = HMVMX_EXPORT_SREG(DS, &pMixedCtx->ds); 4501 AssertRCReturn(rc, rc); 4502 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_DS); 4503 } 4504 4505 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_ES) 4506 { 4507 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4508 pVCpu->hm.s.vmx.RealMode.AttrES.u = pMixedCtx->es.Attr.u; 4509 rc = HMVMX_EXPORT_SREG(ES, &pMixedCtx->es); 4510 AssertRCReturn(rc, rc); 4511 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_ES); 4512 } 4513 4514 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_FS) 4515 { 4516 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4517 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pMixedCtx->fs.Attr.u; 4518 rc = HMVMX_EXPORT_SREG(FS, &pMixedCtx->fs); 4519 AssertRCReturn(rc, rc); 4520 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_FS); 4521 } 4522 4523 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GS) 4524 { 4525 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4526 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pMixedCtx->gs.Attr.u; 4527 rc = HMVMX_EXPORT_SREG(GS, &pMixedCtx->gs); 4528 AssertRCReturn(rc, rc); 4529 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GS); 4530 } 4498 4531 4499 4532 #ifdef VBOX_STRICT … … 4505 4538 EMR0HistoryUpdatePC(pVCpu, pMixedCtx->cs.u64Base + pMixedCtx->rip, true); 4506 4539 4507 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SREG_MASK);4508 4540 Log4Func(("CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base, 4509 4541 pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u)); … … 8552 8584 #endif 8553 8585 #ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE 8554 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pMixedCtx); 8555 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND) 8556 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason)); 8586 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 8587 { 8588 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pMixedCtx); 8589 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND) 8590 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason)); 8591 } 8557 8592 #endif 8558 8593 } … … 10576 10611 } 10577 10612 10578 /* DS, ES, FS, GS - only check for usable selectors, see hmR0Vmx WriteSegmentReg(). */10613 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSegmenReg(). */ 10579 10614 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE)) 10580 10615 { … … 11745 11780 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11746 11781 11747 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */11782 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. FS, GS (base) can be accessed by MSR reads. */ 11748 11783 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 11749 11784 | CPUMCTX_EXTRN_RFLAGS 11750 | CPUMCTX_EXTRN_SS); 11785 | CPUMCTX_EXTRN_SS 11786 | CPUMCTX_EXTRN_FS 11787 | CPUMCTX_EXTRN_GS); 11751 11788 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 11752 11789 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_ALL_MSRS); … … 11802 11839 int rc = VINF_SUCCESS; 11803 11840 11804 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */11841 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. FS, GS (base) can be accessed by MSR writes. */ 11805 11842 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 11806 11843 | CPUMCTX_EXTRN_RFLAGS 11807 | CPUMCTX_EXTRN_SS); 11844 | CPUMCTX_EXTRN_SS 11845 | CPUMCTX_EXTRN_FS 11846 | CPUMCTX_EXTRN_GS); 11808 11847 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 11809 11848 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
Note:
See TracChangeset
for help on using the changeset viewer.