Changeset 86183 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Sep 20, 2020 11:58:23 AM (4 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r84478 r86183 4217 4217 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged 4218 4218 * on sysret. */ 4219 4220 /* Flush the prefetch buffer. */ 4221 #ifdef IEM_WITH_CODE_TLB 4222 pVCpu->iem.s.pbInstrBuf = NULL; 4223 #else 4224 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 4225 #endif 4226 4227 return VINF_SUCCESS; 4228 } 4229 4230 4231 /** 4232 * Implements SYSENTER (Intel, 32-bit AMD). 4233 */ 4234 IEM_CIMPL_DEF_0(iemCImpl_sysenter) 4235 { 4236 RT_NOREF(cbInstr); 4237 4238 /* 4239 * Check preconditions. 4240 * 4241 * Note that CPUs described in the documentation may load a few odd values 4242 * into CS and SS than we allow here. This has yet to be checked on real 4243 * hardware. 4244 */ 4245 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter) 4246 { 4247 Log(("sysenter: not supported -=> #UD\n")); 4248 return iemRaiseUndefinedOpcode(pVCpu); 4249 } 4250 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)) 4251 { 4252 Log(("sysenter: Protected or long mode is required -> #GP(0)\n")); 4253 return iemRaiseGeneralProtectionFault0(pVCpu); 4254 } 4255 bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)); 4256 if (IEM_IS_GUEST_CPU_AMD(pVCpu) && !fIsLongMode) 4257 { 4258 Log(("sysenter: Only available in protected mode on AMD -> #UD\n")); 4259 return iemRaiseUndefinedOpcode(pVCpu); 4260 } 4261 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS); 4262 uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs; 4263 if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0) 4264 { 4265 Log(("sysenter: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs)); 4266 return iemRaiseGeneralProtectionFault0(pVCpu); 4267 } 4268 4269 /* This test isn't in the docs, it's just a safeguard against missing 4270 canonical checks when writing the registers. */ 4271 if (RT_LIKELY( !fIsLongMode 4272 || ( IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.eip) 4273 && IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.esp)))) 4274 { /* likely */ } 4275 else 4276 { 4277 Log(("sysenter: SYSENTER_EIP = %#RX64 or/and SYSENTER_ESP = %#RX64 not canonical -> #GP(0)\n", 4278 pVCpu->cpum.GstCtx.SysEnter.eip, pVCpu->cpum.GstCtx.SysEnter.esp)); 4279 return iemRaiseUndefinedOpcode(pVCpu); 4280 } 4281 4282 /** @todo Test: Sysenter from ring-0, ring-1 and ring-2. */ 4283 4284 /* 4285 * Update registers and commit. 4286 */ 4287 if (fIsLongMode) 4288 { 4289 Log(("sysenter: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, 4290 pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, pVCpu->cpum.GstCtx.SysEnter.eip)); 4291 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.SysEnter.eip; 4292 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.SysEnter.esp; 4293 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT 4294 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC; 4295 } 4296 else 4297 { 4298 Log(("sysenter: %04x:%08RX32 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs, (uint32_t)pVCpu->cpum.GstCtx.rip, 4299 pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip)); 4300 pVCpu->cpum.GstCtx.rip = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip; 4301 pVCpu->cpum.GstCtx.rsp = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.esp; 4302 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT 4303 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC; 4304 } 4305 pVCpu->cpum.GstCtx.cs.Sel = uNewCs & X86_SEL_MASK_OFF_RPL; 4306 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs & X86_SEL_MASK_OFF_RPL; 4307 pVCpu->cpum.GstCtx.cs.u64Base = 0; 4308 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX; 4309 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 4310 4311 pVCpu->cpum.GstCtx.ss.Sel = uNewCs & X86_SEL_MASK_OFF_RPL + 8; 4312 pVCpu->cpum.GstCtx.ss.ValidSel = uNewCs & X86_SEL_MASK_OFF_RPL + 8; 4313 pVCpu->cpum.GstCtx.ss.u64Base = 0; 4314 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX; 4315 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT 4316 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC; 4317 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID; 4318 4319 pVCpu->cpum.GstCtx.rflags.Bits.u1IF = 0; 4320 pVCpu->cpum.GstCtx.rflags.Bits.u1VM = 0; 4321 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0; 4322 4323 pVCpu->iem.s.uCpl = 0; 4324 4325 /* Flush the prefetch buffer. */ 4326 #ifdef IEM_WITH_CODE_TLB 4327 pVCpu->iem.s.pbInstrBuf = NULL; 4328 #else 4329 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 4330 #endif 4331 4332 return VINF_SUCCESS; 4333 } 4334 4335 4336 /** 4337 * Implements SYSEXIT (Intel, 32-bit AMD). 4338 * 4339 * @param enmEffOpSize The effective operand size. 4340 */ 4341 IEM_CIMPL_DEF_1(iemCImpl_sysexit, IEMMODE, enmEffOpSize) 4342 { 4343 RT_NOREF(cbInstr); 4344 4345 /* 4346 * Check preconditions. 4347 * 4348 * Note that CPUs described in the documentation may load a few odd values 4349 * into CS and SS than we allow here. This has yet to be checked on real 4350 * hardware. 4351 */ 4352 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter) 4353 { 4354 Log(("sysexit: not supported -=> #UD\n")); 4355 return iemRaiseUndefinedOpcode(pVCpu); 4356 } 4357 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)) 4358 { 4359 Log(("sysexit: Protected or long mode is required -> #GP(0)\n")); 4360 return iemRaiseGeneralProtectionFault0(pVCpu); 4361 } 4362 bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)); 4363 if (IEM_IS_GUEST_CPU_AMD(pVCpu) && !fIsLongMode) 4364 { 4365 Log(("sysexit: Only available in protected mode on AMD -> #UD\n")); 4366 return iemRaiseUndefinedOpcode(pVCpu); 4367 } 4368 if (pVCpu->iem.s.uCpl != 0) 4369 { 4370 Log(("sysexit: CPL(=%u) != 0 -> #GP(0)\n", pVCpu->iem.s.uCpl)); 4371 return iemRaiseGeneralProtectionFault0(pVCpu); 4372 } 4373 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS); 4374 uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs; 4375 if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0) 4376 { 4377 Log(("sysexit: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs)); 4378 return iemRaiseGeneralProtectionFault0(pVCpu); 4379 } 4380 4381 /* 4382 * Update registers and commit. 4383 */ 4384 if (enmEffOpSize == IEMMODE_64BIT) 4385 { 4386 Log(("sysexit: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, 4387 pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 32, pVCpu->cpum.GstCtx.rcx)); 4388 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rdx; 4389 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.rcx; 4390 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT 4391 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT); 4392 pVCpu->cpum.GstCtx.cs.Sel = (uNewCs | 3) + 32; 4393 pVCpu->cpum.GstCtx.cs.ValidSel = (uNewCs | 3) + 32; 4394 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs | 3) + 40; 4395 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs | 3) + 40; 4396 } 4397 else 4398 { 4399 Log(("sysexit: %04x:%08RX64 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, 4400 pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 16, (uint32_t)pVCpu->cpum.GstCtx.edx)); 4401 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.edx; 4402 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.ecx; 4403 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT 4404 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT); 4405 pVCpu->cpum.GstCtx.cs.Sel = (uNewCs | 3) + 16; 4406 pVCpu->cpum.GstCtx.cs.ValidSel = (uNewCs | 3) + 16; 4407 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs | 3) + 24; 4408 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs | 3) + 24; 4409 } 4410 pVCpu->cpum.GstCtx.cs.u64Base = 0; 4411 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX; 4412 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 4413 4414 pVCpu->cpum.GstCtx.ss.u64Base = 0; 4415 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX; 4416 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT 4417 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC | (3 << X86DESCATTR_DPL_SHIFT); 4418 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID; 4419 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0; 4420 4421 pVCpu->iem.s.uCpl = 3; 4219 4422 4220 4423 /* Flush the prefetch buffer. */ -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
r84477 r86183 2611 2611 2612 2612 /** Opcode 0x0f 0x34. */ 2613 FNIEMOP_STUB(iemOp_sysenter); 2613 FNIEMOP_DEF(iemOp_sysenter) 2614 { 2615 IEMOP_MNEMONIC0(FIXED, SYSENTER, sysenter, DISOPTYPE_CONTROLFLOW | DISOPTYPE_UNCOND_CONTROLFLOW, 0); 2616 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2617 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sysenter); 2618 } 2619 2614 2620 /** Opcode 0x0f 0x35. */ 2615 FNIEMOP_STUB(iemOp_sysexit); 2621 FNIEMOP_DEF(iemOp_sysexit) 2622 { 2623 IEMOP_MNEMONIC0(FIXED, SYSEXIT, sysexit, DISOPTYPE_CONTROLFLOW | DISOPTYPE_UNCOND_CONTROLFLOW, 0); 2624 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2625 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_sysexit, pVCpu->iem.s.enmEffOpSize); 2626 } 2627 2616 2628 /** Opcode 0x0f 0x37. */ 2617 2629 FNIEMOP_STUB(iemOp_getsec);
Note:
See TracChangeset
for help on using the changeset viewer.