Changeset 97406 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Nov 5, 2022 12:42:14 PM (2 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r97370 r97406 10012 10012 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); 10013 10013 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]); 10014 #ifdef VBOX_STRICT 10015 CPUMAssertGuestRFlagsCookie(pVM, pVCpu); 10016 #endif 10014 10017 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 10015 10018 { -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp
r97370 r97406 4505 4505 4506 4506 /** 4507 * Completes a MOV SReg,XXX or POP SReg instruction. 4508 * 4509 * When not modifying SS or when we're already in an interrupt shadow we 4510 * can update RIP and finish the instruction the normal way. 4511 * 4512 * Otherwise, the MOV/POP SS interrupt shadow that we now enable will block 4513 * both TF and DBx events. The TF will be ignored while the DBx ones will 4514 * be delayed till the next instruction boundrary. For more details see 4515 * @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching Stacks}. 4516 */ 4517 DECLINLINE(VBOXSTRICTRC) iemCImpl_LoadSRegFinish(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iSegReg) 4518 { 4519 if (iSegReg != X86_SREG_SS || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) 4520 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr); 4521 4522 iemRegAddToRip(pVCpu, cbInstr); 4523 pVCpu->cpum.GstCtx.eflags.uBoth &= ~X86_EFL_RF; /* Shadow int isn't set and DRx is delayed, so only clear RF. */ 4524 CPUMSetInInterruptShadowSs(&pVCpu->cpum.GstCtx); 4525 4526 return VINF_SUCCESS; 4527 } 4528 4529 4530 /** 4507 4531 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'. 4508 4532 * 4533 * @param pVCpu The cross context virtual CPU structure of the calling 4534 * thread. 4509 4535 * @param iSegReg The segment register number (valid). 4510 4536 * @param uSel The new selector value. 4511 4537 */ 4512 IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t,uSel)4538 static VBOXSTRICTRC iemCImpl_LoadSRegWorker(PVMCPUCC pVCpu, uint8_t iSegReg, uint16_t uSel) 4513 4539 { 4514 4540 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); … … 4539 4565 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE; 4540 4566 #endif 4541 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS); 4542 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr); 4543 } 4544 4567 } 4545 4568 /* 4546 4569 * Protected mode. … … 4549 4572 * FS and GS. If not null, then we have to load and parse the descriptor. 4550 4573 */ 4551 if (!(uSel & X86_SEL_MASK_OFF_RPL))4574 else if (!(uSel & X86_SEL_MASK_OFF_RPL)) 4552 4575 { 4553 4576 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */ … … 4572 4595 if (iSegReg == X86_SREG_SS) 4573 4596 pHid->Attr.u |= pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT; 4574 4575 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid)); 4576 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS); 4577 4578 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr); 4579 } 4580 4581 /* Fetch the descriptor. */ 4582 IEMSELDESC Desc; 4583 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */ 4584 if (rcStrict != VINF_SUCCESS) 4585 return rcStrict; 4586 4587 /* Check GPs first. */ 4588 if (!Desc.Legacy.Gen.u1DescType) 4589 { 4590 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type)); 4591 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4592 } 4593 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */ 4594 { 4595 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) 4596 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) ) 4597 { 4598 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type)); 4597 } 4598 else 4599 { 4600 4601 /* Fetch the descriptor. */ 4602 IEMSELDESC Desc; 4603 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */ 4604 if (rcStrict != VINF_SUCCESS) 4605 return rcStrict; 4606 4607 /* Check GPs first. */ 4608 if (!Desc.Legacy.Gen.u1DescType) 4609 { 4610 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type)); 4599 4611 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4600 4612 } 4601 if ((uSel & X86_SEL_RPL) != pVCpu->iem.s.uCpl) 4602 { 4603 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pVCpu->iem.s.uCpl)); 4604 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4605 } 4606 if (Desc.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl) 4607 { 4608 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl)); 4609 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4610 } 4611 } 4612 else 4613 { 4614 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE) 4615 { 4616 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel)); 4617 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4618 } 4619 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 4620 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 4621 { 4622 #if 0 /* this is what intel says. */ 4623 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl 4624 && pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl) 4613 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */ 4614 { 4615 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) 4616 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) ) 4625 4617 { 4626 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n", 4627 iSegReg, uSel, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl)); 4618 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type)); 4628 4619 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4629 4620 } 4630 #else /* this is what makes more sense. */ 4631 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl) 4621 if ((uSel & X86_SEL_RPL) != pVCpu->iem.s.uCpl) 4632 4622 { 4633 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n", 4634 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl)); 4623 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pVCpu->iem.s.uCpl)); 4635 4624 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4636 4625 } 4637 if ( pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)4626 if (Desc.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl) 4638 4627 { 4639 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n", 4640 iSegReg, uSel, pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl)); 4628 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl)); 4641 4629 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4642 4630 } 4631 } 4632 else 4633 { 4634 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE) 4635 { 4636 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel)); 4637 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4638 } 4639 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 4640 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 4641 { 4642 #if 0 /* this is what intel says. */ 4643 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl 4644 && pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl) 4645 { 4646 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n", 4647 iSegReg, uSel, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl)); 4648 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4649 } 4650 #else /* this is what makes more sense. */ 4651 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl) 4652 { 4653 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n", 4654 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl)); 4655 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4656 } 4657 if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl) 4658 { 4659 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n", 4660 iSegReg, uSel, pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl)); 4661 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4662 } 4643 4663 #endif 4644 } 4645 } 4646 4647 /* Is it there? */ 4648 if (!Desc.Legacy.Gen.u1Present) 4649 { 4650 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel)); 4651 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel); 4652 } 4653 4654 /* The base and limit. */ 4655 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy); 4656 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy); 4657 4658 /* 4659 * Ok, everything checked out fine. Now set the accessed bit before 4660 * committing the result into the registers. 4661 */ 4662 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 4663 { 4664 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel); 4665 if (rcStrict != VINF_SUCCESS) 4666 return rcStrict; 4667 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 4668 } 4669 4670 /* commit */ 4671 *pSel = uSel; 4672 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); 4673 pHid->u32Limit = cbLimit; 4674 pHid->u64Base = u64Base; 4675 pHid->ValidSel = uSel; 4676 pHid->fFlags = CPUMSELREG_FLAGS_VALID; 4677 4678 /** @todo check if the hidden bits are loaded correctly for 64-bit 4679 * mode. */ 4664 } 4665 } 4666 4667 /* Is it there? */ 4668 if (!Desc.Legacy.Gen.u1Present) 4669 { 4670 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel)); 4671 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel); 4672 } 4673 4674 /* The base and limit. */ 4675 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy); 4676 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy); 4677 4678 /* 4679 * Ok, everything checked out fine. Now set the accessed bit before 4680 * committing the result into the registers. 4681 */ 4682 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 4683 { 4684 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel); 4685 if (rcStrict != VINF_SUCCESS) 4686 return rcStrict; 4687 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 4688 } 4689 4690 /* commit */ 4691 *pSel = uSel; 4692 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); 4693 pHid->u32Limit = cbLimit; 4694 pHid->u64Base = u64Base; 4695 pHid->ValidSel = uSel; 4696 pHid->fFlags = CPUMSELREG_FLAGS_VALID; 4697 4698 /** @todo check if the hidden bits are loaded correctly for 64-bit 4699 * mode. */ 4700 } 4701 4680 4702 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid)); 4681 4682 4703 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS); 4683 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);4704 return VINF_SUCCESS; 4684 4705 } 4685 4706 … … 4693 4714 IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel) 4694 4715 { 4695 if (iSegReg != X86_SREG_SS) 4696 return IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel); 4697 /** @todo only set it the shadow flag if it was clear before? */ 4698 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel); 4716 VBOXSTRICTRC rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel); 4699 4717 if (rcStrict == VINF_SUCCESS) 4700 CPUMSetInInterruptShadowSs(&pVCpu->cpum.GstCtx);4718 rcStrict = iemCImpl_LoadSRegFinish(pVCpu, cbInstr, iSegReg); 4701 4719 return rcStrict; 4702 4720 } … … 4725 4743 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp); 4726 4744 if (rcStrict == VINF_SUCCESS) 4727 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);4745 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel); 4728 4746 break; 4729 4747 } … … 4734 4752 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp); 4735 4753 if (rcStrict == VINF_SUCCESS) 4736 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);4754 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u32Value); 4737 4755 break; 4738 4756 } … … 4743 4761 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp); 4744 4762 if (rcStrict == VINF_SUCCESS) 4745 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);4763 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u64Value); 4746 4764 break; 4747 4765 } … … 4749 4767 } 4750 4768 4751 /* 4752 * Commit the stack on success and set interrupt shadow flag if appropriate 4753 * (the latter must be done after updating RIP). 4769 /* 4770 * If the load succeeded, commit the stack change and finish the instruction. 4754 4771 */ 4755 4772 if (rcStrict == VINF_SUCCESS) 4756 4773 { 4757 4774 pVCpu->cpum.GstCtx.rsp = TmpRsp.u; 4758 if (iSegReg == X86_SREG_SS) 4759 { 4760 /** @todo only set it the shadow flag if it was clear before? */ 4761 CPUMSetInInterruptShadowSs(&pVCpu->cpum.GstCtx); 4762 } 4763 } 4775 rcStrict = iemCImpl_LoadSRegFinish(pVCpu, cbInstr, iSegReg); 4776 } 4777 4764 4778 return rcStrict; 4765 4779 } … … 4772 4786 { 4773 4787 /* 4774 * Use iemCImpl_LoadSReg to do the tricky segment register loading.4788 * Use iemCImpl_LoadSRegWorker to do the tricky segment register loading. 4775 4789 */ 4776 4790 /** @todo verify and test that mov, pop and lXs works the segment 4777 4791 * register loading in the exact same way. */ 4778 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);4792 VBOXSTRICTRC rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel); 4779 4793 if (rcStrict == VINF_SUCCESS) 4780 4794 { … … 4785 4799 break; 4786 4800 case IEMMODE_32BIT: 4787 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg;4788 break;4789 4801 case IEMMODE_64BIT: 4790 4802 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg; … … 4792 4804 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 4793 4805 } 4794 }4795 4806 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr); 4807 } 4796 4808 return rcStrict; 4797 4809 }
Note:
See TracChangeset
for help on using the changeset viewer.