VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 47319

Last change on this file since 47319 was 47319, checked in by vboxsync, 11 years ago

IEM: More 64-bit fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 167.6 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 47319 2013-07-22 16:53:15Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 X86EFLAGS Efl;
38 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
39 if ( (pCtx->cr0 & X86_CR0_PE)
40 && ( pIemCpu->uCpl > Efl.Bits.u2IOPL
41 || Efl.Bits.u1VM) )
42 {
43 NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
44 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap\n"));
45 }
46 return VINF_SUCCESS;
47}
48
49
50#if 0
51/**
52 * Calculates the parity bit.
53 *
54 * @returns true if the bit is set, false if not.
55 * @param u8Result The least significant byte of the result.
56 */
57static bool iemHlpCalcParityFlag(uint8_t u8Result)
58{
59 /*
60 * Parity is set if the number of bits in the least significant byte of
61 * the result is even.
62 */
63 uint8_t cBits;
64 cBits = u8Result & 1; /* 0 */
65 u8Result >>= 1;
66 cBits += u8Result & 1;
67 u8Result >>= 1;
68 cBits += u8Result & 1;
69 u8Result >>= 1;
70 cBits += u8Result & 1;
71 u8Result >>= 1;
72 cBits += u8Result & 1; /* 4 */
73 u8Result >>= 1;
74 cBits += u8Result & 1;
75 u8Result >>= 1;
76 cBits += u8Result & 1;
77 u8Result >>= 1;
78 cBits += u8Result & 1;
79 return !(cBits & 1);
80}
81#endif /* not used */
82
83
84/**
85 * Updates the specified flags according to a 8-bit result.
86 *
87 * @param pIemCpu The IEM state of the calling EMT.
88 * @param u8Result The result to set the flags according to.
89 * @param fToUpdate The flags to update.
90 * @param fUndefined The flags that are specified as undefined.
91 */
92static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
93{
94 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
95
96 uint32_t fEFlags = pCtx->eflags.u;
97 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
98 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
99 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
100}
101
102
103/**
104 * Loads a NULL data selector into a selector register, both the hidden and
105 * visible parts, in protected mode.
106 *
107 * @param pSReg Pointer to the segment register.
108 * @param uRpl The RPL.
109 */
110static void iemHlpLoadNullDataSelectorProt(PCPUMSELREG pSReg, RTSEL uRpl)
111{
112 /** @todo Testcase: write a testcase checking what happends when loading a NULL
113 * data selector in protected mode. */
114 pSReg->Sel = uRpl;
115 pSReg->ValidSel = uRpl;
116 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
117 pSReg->u64Base = 0;
118 pSReg->u32Limit = 0;
119 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
120}
121
122
123/**
124 * Helper used by iret.
125 *
126 * @param uCpl The new CPL.
127 * @param pSReg Pointer to the segment register.
128 */
129static void iemHlpAdjustSelectorForNewCpl(PIEMCPU pIemCpu, uint8_t uCpl, PCPUMSELREG pSReg)
130{
131#ifdef VBOX_WITH_RAW_MODE_NOT_R0
132 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
133 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
134#else
135 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
136#endif
137
138 if ( uCpl > pSReg->Attr.n.u2Dpl
139 && pSReg->Attr.n.u1DescType /* code or data, not system */
140 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
141 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
142 iemHlpLoadNullDataSelectorProt(pSReg, 0);
143}
144
145
146/**
147 * Indicates that we have modified the FPU state.
148 *
149 * @param pIemCpu The IEM state of the calling EMT.
150 */
151DECLINLINE(void) iemHlpUsedFpu(PIEMCPU pIemCpu)
152{
153 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);
154}
155
156/** @} */
157
158/** @name C Implementations
159 * @{
160 */
161
162/**
163 * Implements a 16-bit popa.
164 */
165IEM_CIMPL_DEF_0(iemCImpl_popa_16)
166{
167 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
168 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
169 RTGCPTR GCPtrLast = GCPtrStart + 15;
170 VBOXSTRICTRC rcStrict;
171
172 /*
173 * The docs are a bit hard to comprehend here, but it looks like we wrap
174 * around in real mode as long as none of the individual "popa" crosses the
175 * end of the stack segment. In protected mode we check the whole access
176 * in one go. For efficiency, only do the word-by-word thing if we're in
177 * danger of wrapping around.
178 */
179 /** @todo do popa boundary / wrap-around checks. */
180 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
181 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
182 {
183 /* word-by-word */
184 RTUINT64U TmpRsp;
185 TmpRsp.u = pCtx->rsp;
186 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
187 if (rcStrict == VINF_SUCCESS)
188 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
189 if (rcStrict == VINF_SUCCESS)
190 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
191 if (rcStrict == VINF_SUCCESS)
192 {
193 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
194 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
195 }
196 if (rcStrict == VINF_SUCCESS)
197 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
198 if (rcStrict == VINF_SUCCESS)
199 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
200 if (rcStrict == VINF_SUCCESS)
201 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
202 if (rcStrict == VINF_SUCCESS)
203 {
204 pCtx->rsp = TmpRsp.u;
205 iemRegAddToRip(pIemCpu, cbInstr);
206 }
207 }
208 else
209 {
210 uint16_t const *pa16Mem = NULL;
211 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
212 if (rcStrict == VINF_SUCCESS)
213 {
214 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
215 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
216 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
217 /* skip sp */
218 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
219 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
220 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
221 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
222 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
223 if (rcStrict == VINF_SUCCESS)
224 {
225 iemRegAddToRsp(pCtx, 16);
226 iemRegAddToRip(pIemCpu, cbInstr);
227 }
228 }
229 }
230 return rcStrict;
231}
232
233
234/**
235 * Implements a 32-bit popa.
236 */
237IEM_CIMPL_DEF_0(iemCImpl_popa_32)
238{
239 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
240 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
241 RTGCPTR GCPtrLast = GCPtrStart + 31;
242 VBOXSTRICTRC rcStrict;
243
244 /*
245 * The docs are a bit hard to comprehend here, but it looks like we wrap
246 * around in real mode as long as none of the individual "popa" crosses the
247 * end of the stack segment. In protected mode we check the whole access
248 * in one go. For efficiency, only do the word-by-word thing if we're in
249 * danger of wrapping around.
250 */
251 /** @todo do popa boundary / wrap-around checks. */
252 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
253 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
254 {
255 /* word-by-word */
256 RTUINT64U TmpRsp;
257 TmpRsp.u = pCtx->rsp;
258 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
259 if (rcStrict == VINF_SUCCESS)
260 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
261 if (rcStrict == VINF_SUCCESS)
262 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
263 if (rcStrict == VINF_SUCCESS)
264 {
265 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
266 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
267 }
268 if (rcStrict == VINF_SUCCESS)
269 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
270 if (rcStrict == VINF_SUCCESS)
271 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
272 if (rcStrict == VINF_SUCCESS)
273 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
274 if (rcStrict == VINF_SUCCESS)
275 {
276#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
277 pCtx->rdi &= UINT32_MAX;
278 pCtx->rsi &= UINT32_MAX;
279 pCtx->rbp &= UINT32_MAX;
280 pCtx->rbx &= UINT32_MAX;
281 pCtx->rdx &= UINT32_MAX;
282 pCtx->rcx &= UINT32_MAX;
283 pCtx->rax &= UINT32_MAX;
284#endif
285 pCtx->rsp = TmpRsp.u;
286 iemRegAddToRip(pIemCpu, cbInstr);
287 }
288 }
289 else
290 {
291 uint32_t const *pa32Mem;
292 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
293 if (rcStrict == VINF_SUCCESS)
294 {
295 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
296 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
297 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
298 /* skip esp */
299 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
300 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
301 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
302 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
303 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
304 if (rcStrict == VINF_SUCCESS)
305 {
306 iemRegAddToRsp(pCtx, 32);
307 iemRegAddToRip(pIemCpu, cbInstr);
308 }
309 }
310 }
311 return rcStrict;
312}
313
314
315/**
316 * Implements a 16-bit pusha.
317 */
318IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
319{
320 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
321 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
322 RTGCPTR GCPtrBottom = GCPtrTop - 15;
323 VBOXSTRICTRC rcStrict;
324
325 /*
326 * The docs are a bit hard to comprehend here, but it looks like we wrap
327 * around in real mode as long as none of the individual "pushd" crosses the
328 * end of the stack segment. In protected mode we check the whole access
329 * in one go. For efficiency, only do the word-by-word thing if we're in
330 * danger of wrapping around.
331 */
332 /** @todo do pusha boundary / wrap-around checks. */
333 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
334 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
335 {
336 /* word-by-word */
337 RTUINT64U TmpRsp;
338 TmpRsp.u = pCtx->rsp;
339 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
340 if (rcStrict == VINF_SUCCESS)
341 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
342 if (rcStrict == VINF_SUCCESS)
343 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
344 if (rcStrict == VINF_SUCCESS)
345 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
346 if (rcStrict == VINF_SUCCESS)
347 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
348 if (rcStrict == VINF_SUCCESS)
349 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
350 if (rcStrict == VINF_SUCCESS)
351 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
352 if (rcStrict == VINF_SUCCESS)
353 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
354 if (rcStrict == VINF_SUCCESS)
355 {
356 pCtx->rsp = TmpRsp.u;
357 iemRegAddToRip(pIemCpu, cbInstr);
358 }
359 }
360 else
361 {
362 GCPtrBottom--;
363 uint16_t *pa16Mem = NULL;
364 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
365 if (rcStrict == VINF_SUCCESS)
366 {
367 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
368 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
369 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
370 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
371 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
372 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
373 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
374 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
375 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
376 if (rcStrict == VINF_SUCCESS)
377 {
378 iemRegSubFromRsp(pCtx, 16);
379 iemRegAddToRip(pIemCpu, cbInstr);
380 }
381 }
382 }
383 return rcStrict;
384}
385
386
387/**
388 * Implements a 32-bit pusha.
389 */
390IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
391{
392 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
393 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
394 RTGCPTR GCPtrBottom = GCPtrTop - 31;
395 VBOXSTRICTRC rcStrict;
396
397 /*
398 * The docs are a bit hard to comprehend here, but it looks like we wrap
399 * around in real mode as long as none of the individual "pusha" crosses the
400 * end of the stack segment. In protected mode we check the whole access
401 * in one go. For efficiency, only do the word-by-word thing if we're in
402 * danger of wrapping around.
403 */
404 /** @todo do pusha boundary / wrap-around checks. */
405 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
406 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
407 {
408 /* word-by-word */
409 RTUINT64U TmpRsp;
410 TmpRsp.u = pCtx->rsp;
411 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
412 if (rcStrict == VINF_SUCCESS)
413 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
414 if (rcStrict == VINF_SUCCESS)
415 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
416 if (rcStrict == VINF_SUCCESS)
417 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
418 if (rcStrict == VINF_SUCCESS)
419 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
420 if (rcStrict == VINF_SUCCESS)
421 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
422 if (rcStrict == VINF_SUCCESS)
423 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
424 if (rcStrict == VINF_SUCCESS)
425 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
426 if (rcStrict == VINF_SUCCESS)
427 {
428 pCtx->rsp = TmpRsp.u;
429 iemRegAddToRip(pIemCpu, cbInstr);
430 }
431 }
432 else
433 {
434 GCPtrBottom--;
435 uint32_t *pa32Mem;
436 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
437 if (rcStrict == VINF_SUCCESS)
438 {
439 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
440 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
441 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
442 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
443 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
444 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
445 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
446 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
447 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
448 if (rcStrict == VINF_SUCCESS)
449 {
450 iemRegSubFromRsp(pCtx, 32);
451 iemRegAddToRip(pIemCpu, cbInstr);
452 }
453 }
454 }
455 return rcStrict;
456}
457
458
459/**
460 * Implements pushf.
461 *
462 *
463 * @param enmEffOpSize The effective operand size.
464 */
465IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
466{
467 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
468
469 /*
470 * If we're in V8086 mode some care is required (which is why we're in
471 * doing this in a C implementation).
472 */
473 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
474 if ( (fEfl & X86_EFL_VM)
475 && X86_EFL_GET_IOPL(fEfl) != 3 )
476 {
477 Assert(pCtx->cr0 & X86_CR0_PE);
478 if ( enmEffOpSize != IEMMODE_16BIT
479 || !(pCtx->cr4 & X86_CR4_VME))
480 return iemRaiseGeneralProtectionFault0(pIemCpu);
481 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
482 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
483 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
484 }
485
486 /*
487 * Ok, clear RF and VM and push the flags.
488 */
489 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
490
491 VBOXSTRICTRC rcStrict;
492 switch (enmEffOpSize)
493 {
494 case IEMMODE_16BIT:
495 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
496 break;
497 case IEMMODE_32BIT:
498 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
499 break;
500 case IEMMODE_64BIT:
501 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
502 break;
503 IEM_NOT_REACHED_DEFAULT_CASE_RET();
504 }
505 if (rcStrict != VINF_SUCCESS)
506 return rcStrict;
507
508 iemRegAddToRip(pIemCpu, cbInstr);
509 return VINF_SUCCESS;
510}
511
512
513/**
514 * Implements popf.
515 *
516 * @param enmEffOpSize The effective operand size.
517 */
518IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
519{
520 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
521 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
522 uint32_t const fEflOld = IEMMISC_GET_EFL(pIemCpu, pCtx);
523 VBOXSTRICTRC rcStrict;
524 uint32_t fEflNew;
525
526 /*
527 * V8086 is special as usual.
528 */
529 if (fEflOld & X86_EFL_VM)
530 {
531 /*
532 * Almost anything goes if IOPL is 3.
533 */
534 if (X86_EFL_GET_IOPL(fEflOld) == 3)
535 {
536 switch (enmEffOpSize)
537 {
538 case IEMMODE_16BIT:
539 {
540 uint16_t u16Value;
541 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
542 if (rcStrict != VINF_SUCCESS)
543 return rcStrict;
544 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
545 break;
546 }
547 case IEMMODE_32BIT:
548 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
549 if (rcStrict != VINF_SUCCESS)
550 return rcStrict;
551 break;
552 IEM_NOT_REACHED_DEFAULT_CASE_RET();
553 }
554
555 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
556 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
557 }
558 /*
559 * Interrupt flag virtualization with CR4.VME=1.
560 */
561 else if ( enmEffOpSize == IEMMODE_16BIT
562 && (pCtx->cr4 & X86_CR4_VME) )
563 {
564 uint16_t u16Value;
565 RTUINT64U TmpRsp;
566 TmpRsp.u = pCtx->rsp;
567 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
568 if (rcStrict != VINF_SUCCESS)
569 return rcStrict;
570
571 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
572 * or before? */
573 if ( ( (u16Value & X86_EFL_IF)
574 && (fEflOld & X86_EFL_VIP))
575 || (u16Value & X86_EFL_TF) )
576 return iemRaiseGeneralProtectionFault0(pIemCpu);
577
578 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
579 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
580 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
581 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
582
583 pCtx->rsp = TmpRsp.u;
584 }
585 else
586 return iemRaiseGeneralProtectionFault0(pIemCpu);
587
588 }
589 /*
590 * Not in V8086 mode.
591 */
592 else
593 {
594 /* Pop the flags. */
595 switch (enmEffOpSize)
596 {
597 case IEMMODE_16BIT:
598 {
599 uint16_t u16Value;
600 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
601 if (rcStrict != VINF_SUCCESS)
602 return rcStrict;
603 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
604 break;
605 }
606 case IEMMODE_32BIT:
607 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
608 if (rcStrict != VINF_SUCCESS)
609 return rcStrict;
610 break;
611 case IEMMODE_64BIT:
612 {
613 uint64_t u64Value;
614 rcStrict = iemMemStackPopU64(pIemCpu, &u64Value);
615 if (rcStrict != VINF_SUCCESS)
616 return rcStrict;
617 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
618 break;
619 }
620 IEM_NOT_REACHED_DEFAULT_CASE_RET();
621 }
622
623 /* Merge them with the current flags. */
624 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
625 || pIemCpu->uCpl == 0)
626 {
627 fEflNew &= X86_EFL_POPF_BITS;
628 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
629 }
630 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
631 {
632 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
633 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
634 }
635 else
636 {
637 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
638 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
639 }
640 }
641
642 /*
643 * Commit the flags.
644 */
645 Assert(fEflNew & RT_BIT_32(1));
646 IEMMISC_SET_EFL(pIemCpu, pCtx, fEflNew);
647 iemRegAddToRip(pIemCpu, cbInstr);
648
649 return VINF_SUCCESS;
650}
651
652
653/**
654 * Implements an indirect call.
655 *
656 * @param uNewPC The new program counter (RIP) value (loaded from the
657 * operand).
658 * @param enmEffOpSize The effective operand size.
659 */
660IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
661{
662 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
663 uint16_t uOldPC = pCtx->ip + cbInstr;
664 if (uNewPC > pCtx->cs.u32Limit)
665 return iemRaiseGeneralProtectionFault0(pIemCpu);
666
667 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
668 if (rcStrict != VINF_SUCCESS)
669 return rcStrict;
670
671 pCtx->rip = uNewPC;
672 return VINF_SUCCESS;
673
674}
675
676
677/**
678 * Implements a 16-bit relative call.
679 *
680 * @param offDisp The displacment offset.
681 */
682IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
683{
684 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
685 uint16_t uOldPC = pCtx->ip + cbInstr;
686 uint16_t uNewPC = uOldPC + offDisp;
687 if (uNewPC > pCtx->cs.u32Limit)
688 return iemRaiseGeneralProtectionFault0(pIemCpu);
689
690 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
691 if (rcStrict != VINF_SUCCESS)
692 return rcStrict;
693
694 pCtx->rip = uNewPC;
695 return VINF_SUCCESS;
696}
697
698
699/**
700 * Implements a 32-bit indirect call.
701 *
702 * @param uNewPC The new program counter (RIP) value (loaded from the
703 * operand).
704 * @param enmEffOpSize The effective operand size.
705 */
706IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
707{
708 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
709 uint32_t uOldPC = pCtx->eip + cbInstr;
710 if (uNewPC > pCtx->cs.u32Limit)
711 return iemRaiseGeneralProtectionFault0(pIemCpu);
712
713 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
714 if (rcStrict != VINF_SUCCESS)
715 return rcStrict;
716
717 pCtx->rip = uNewPC;
718 return VINF_SUCCESS;
719
720}
721
722
723/**
724 * Implements a 32-bit relative call.
725 *
726 * @param offDisp The displacment offset.
727 */
728IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
729{
730 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
731 uint32_t uOldPC = pCtx->eip + cbInstr;
732 uint32_t uNewPC = uOldPC + offDisp;
733 if (uNewPC > pCtx->cs.u32Limit)
734 return iemRaiseGeneralProtectionFault0(pIemCpu);
735
736 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
737 if (rcStrict != VINF_SUCCESS)
738 return rcStrict;
739
740 pCtx->rip = uNewPC;
741 return VINF_SUCCESS;
742}
743
744
745/**
746 * Implements a 64-bit indirect call.
747 *
748 * @param uNewPC The new program counter (RIP) value (loaded from the
749 * operand).
750 * @param enmEffOpSize The effective operand size.
751 */
752IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
753{
754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
755 uint64_t uOldPC = pCtx->rip + cbInstr;
756 if (!IEM_IS_CANONICAL(uNewPC))
757 return iemRaiseGeneralProtectionFault0(pIemCpu);
758
759 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
760 if (rcStrict != VINF_SUCCESS)
761 return rcStrict;
762
763 pCtx->rip = uNewPC;
764 return VINF_SUCCESS;
765
766}
767
768
769/**
770 * Implements a 64-bit relative call.
771 *
772 * @param offDisp The displacment offset.
773 */
774IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
775{
776 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
777 uint64_t uOldPC = pCtx->rip + cbInstr;
778 uint64_t uNewPC = uOldPC + offDisp;
779 if (!IEM_IS_CANONICAL(uNewPC))
780 return iemRaiseNotCanonical(pIemCpu);
781
782 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
783 if (rcStrict != VINF_SUCCESS)
784 return rcStrict;
785
786 pCtx->rip = uNewPC;
787 return VINF_SUCCESS;
788}
789
790
791/**
792 * Implements far jumps and calls thru task segments (TSS).
793 *
794 * @param uSel The selector.
795 * @param enmBranch The kind of branching we're performing.
796 * @param enmEffOpSize The effective operand size.
797 * @param pDesc The descriptor corrsponding to @a uSel. The type is
798 * call gate.
799 */
800IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
801{
802 /* Call various functions to do the work. */
803 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
804}
805
806
807/**
808 * Implements far jumps and calls thru task gates.
809 *
810 * @param uSel The selector.
811 * @param enmBranch The kind of branching we're performing.
812 * @param enmEffOpSize The effective operand size.
813 * @param pDesc The descriptor corrsponding to @a uSel. The type is
814 * call gate.
815 */
816IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
817{
818 /* Call various functions to do the work. */
819 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
820}
821
822
823/**
824 * Implements far jumps and calls thru call gates.
825 *
826 * @param uSel The selector.
827 * @param enmBranch The kind of branching we're performing.
828 * @param enmEffOpSize The effective operand size.
829 * @param pDesc The descriptor corrsponding to @a uSel. The type is
830 * call gate.
831 */
832IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
833{
834 /* Call various functions to do the work. */
835 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
836}
837
838
839/**
840 * Implements far jumps and calls thru system selectors.
841 *
842 * @param uSel The selector.
843 * @param enmBranch The kind of branching we're performing.
844 * @param enmEffOpSize The effective operand size.
845 * @param pDesc The descriptor corrsponding to @a uSel.
846 */
847IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
848{
849 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
850 Assert((uSel & X86_SEL_MASK_OFF_RPL));
851
852 if (IEM_IS_LONG_MODE(pIemCpu))
853 switch (pDesc->Legacy.Gen.u4Type)
854 {
855 case AMD64_SEL_TYPE_SYS_CALL_GATE:
856 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
857
858 default:
859 case AMD64_SEL_TYPE_SYS_LDT:
860 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
861 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
862 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
863 case AMD64_SEL_TYPE_SYS_INT_GATE:
864 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
865 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
866
867 }
868
869 switch (pDesc->Legacy.Gen.u4Type)
870 {
871 case X86_SEL_TYPE_SYS_286_CALL_GATE:
872 case X86_SEL_TYPE_SYS_386_CALL_GATE:
873 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
874
875 case X86_SEL_TYPE_SYS_TASK_GATE:
876 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
877
878 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
879 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
880 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
881
882 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
883 Log(("branch %04x -> busy 286 TSS\n", uSel));
884 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
885
886 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
887 Log(("branch %04x -> busy 386 TSS\n", uSel));
888 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
889
890 default:
891 case X86_SEL_TYPE_SYS_LDT:
892 case X86_SEL_TYPE_SYS_286_INT_GATE:
893 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
894 case X86_SEL_TYPE_SYS_386_INT_GATE:
895 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
896 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
897 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
898 }
899}
900
901
902/**
903 * Implements far jumps.
904 *
905 * @param uSel The selector.
906 * @param offSeg The segment offset.
907 * @param enmEffOpSize The effective operand size.
908 */
909IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
910{
911 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
912 NOREF(cbInstr);
913 Assert(offSeg <= UINT32_MAX);
914
915 /*
916 * Real mode and V8086 mode are easy. The only snag seems to be that
917 * CS.limit doesn't change and the limit check is done against the current
918 * limit.
919 */
920 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
921 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
922 {
923 if (offSeg > pCtx->cs.u32Limit)
924 return iemRaiseGeneralProtectionFault0(pIemCpu);
925
926 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
927 pCtx->rip = offSeg;
928 else
929 pCtx->rip = offSeg & UINT16_MAX;
930 pCtx->cs.Sel = uSel;
931 pCtx->cs.ValidSel = uSel;
932 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
933 pCtx->cs.u64Base = (uint32_t)uSel << 4;
934 return VINF_SUCCESS;
935 }
936
937 /*
938 * Protected mode. Need to parse the specified descriptor...
939 */
940 if (!(uSel & X86_SEL_MASK_OFF_RPL))
941 {
942 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
943 return iemRaiseGeneralProtectionFault0(pIemCpu);
944 }
945
946 /* Fetch the descriptor. */
947 IEMSELDESC Desc;
948 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
949 if (rcStrict != VINF_SUCCESS)
950 return rcStrict;
951
952 /* Is it there? */
953 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
954 {
955 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
956 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
957 }
958
959 /*
960 * Deal with it according to its type. We do the standard code selectors
961 * here and dispatch the system selectors to worker functions.
962 */
963 if (!Desc.Legacy.Gen.u1DescType)
964 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
965
966 /* Only code segments. */
967 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
968 {
969 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
970 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
971 }
972
973 /* L vs D. */
974 if ( Desc.Legacy.Gen.u1Long
975 && Desc.Legacy.Gen.u1DefBig
976 && IEM_IS_LONG_MODE(pIemCpu))
977 {
978 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
979 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
980 }
981
982 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
983 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
984 {
985 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
986 {
987 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
988 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
989 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
990 }
991 }
992 else
993 {
994 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
995 {
996 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
997 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
998 }
999 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1000 {
1001 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1002 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1003 }
1004 }
1005
1006 /* Chop the high bits if 16-bit (Intel says so). */
1007 if (enmEffOpSize == IEMMODE_16BIT)
1008 offSeg &= UINT16_MAX;
1009
1010 /* Limit check. (Should alternatively check for non-canonical addresses
1011 here, but that is ruled out by offSeg being 32-bit, right?) */
1012 uint64_t u64Base;
1013 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1014 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1015 u64Base = 0;
1016 else
1017 {
1018 if (offSeg > cbLimit)
1019 {
1020 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1021 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1022 }
1023 u64Base = X86DESC_BASE(&Desc.Legacy);
1024 }
1025
1026 /*
1027 * Ok, everything checked out fine. Now set the accessed bit before
1028 * committing the result into CS, CSHID and RIP.
1029 */
1030 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1031 {
1032 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1033 if (rcStrict != VINF_SUCCESS)
1034 return rcStrict;
1035 /** @todo check what VT-x and AMD-V does. */
1036 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1037 }
1038
1039 /* commit */
1040 pCtx->rip = offSeg;
1041 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1042 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1043 pCtx->cs.ValidSel = pCtx->cs.Sel;
1044 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1045 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1046 pCtx->cs.u32Limit = cbLimit;
1047 pCtx->cs.u64Base = u64Base;
1048 /** @todo check if the hidden bits are loaded correctly for 64-bit
1049 * mode. */
1050 return VINF_SUCCESS;
1051}
1052
1053
1054/**
1055 * Implements far calls.
1056 *
1057 * This very similar to iemCImpl_FarJmp.
1058 *
1059 * @param uSel The selector.
1060 * @param offSeg The segment offset.
1061 * @param enmEffOpSize The operand size (in case we need it).
1062 */
1063IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1064{
1065 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1066 VBOXSTRICTRC rcStrict;
1067 uint64_t uNewRsp;
1068 RTPTRUNION uPtrRet;
1069
1070 /*
1071 * Real mode and V8086 mode are easy. The only snag seems to be that
1072 * CS.limit doesn't change and the limit check is done against the current
1073 * limit.
1074 */
1075 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1076 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1077 {
1078 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1079
1080 /* Check stack first - may #SS(0). */
1081 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1082 &uPtrRet.pv, &uNewRsp);
1083 if (rcStrict != VINF_SUCCESS)
1084 return rcStrict;
1085
1086 /* Check the target address range. */
1087 if (offSeg > UINT32_MAX)
1088 return iemRaiseGeneralProtectionFault0(pIemCpu);
1089
1090 /* Everything is fine, push the return address. */
1091 if (enmEffOpSize == IEMMODE_16BIT)
1092 {
1093 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1094 uPtrRet.pu16[1] = pCtx->cs.Sel;
1095 }
1096 else
1097 {
1098 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1099 uPtrRet.pu16[3] = pCtx->cs.Sel;
1100 }
1101 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1102 if (rcStrict != VINF_SUCCESS)
1103 return rcStrict;
1104
1105 /* Branch. */
1106 pCtx->rip = offSeg;
1107 pCtx->cs.Sel = uSel;
1108 pCtx->cs.ValidSel = uSel;
1109 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1110 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1111 return VINF_SUCCESS;
1112 }
1113
1114 /*
1115 * Protected mode. Need to parse the specified descriptor...
1116 */
1117 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1118 {
1119 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1120 return iemRaiseGeneralProtectionFault0(pIemCpu);
1121 }
1122
1123 /* Fetch the descriptor. */
1124 IEMSELDESC Desc;
1125 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1126 if (rcStrict != VINF_SUCCESS)
1127 return rcStrict;
1128
1129 /*
1130 * Deal with it according to its type. We do the standard code selectors
1131 * here and dispatch the system selectors to worker functions.
1132 */
1133 if (!Desc.Legacy.Gen.u1DescType)
1134 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1135
1136 /* Only code segments. */
1137 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1138 {
1139 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1140 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1141 }
1142
1143 /* L vs D. */
1144 if ( Desc.Legacy.Gen.u1Long
1145 && Desc.Legacy.Gen.u1DefBig
1146 && IEM_IS_LONG_MODE(pIemCpu))
1147 {
1148 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1149 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1150 }
1151
1152 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1153 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1154 {
1155 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1156 {
1157 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1158 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1159 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1160 }
1161 }
1162 else
1163 {
1164 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1165 {
1166 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1167 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1168 }
1169 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1170 {
1171 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1172 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1173 }
1174 }
1175
1176 /* Is it there? */
1177 if (!Desc.Legacy.Gen.u1Present)
1178 {
1179 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1180 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1181 }
1182
1183 /* Check stack first - may #SS(0). */
1184 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1185 * 16-bit code cause a two or four byte CS to be pushed? */
1186 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1187 enmEffOpSize == IEMMODE_64BIT ? 8+8
1188 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1189 &uPtrRet.pv, &uNewRsp);
1190 if (rcStrict != VINF_SUCCESS)
1191 return rcStrict;
1192
1193 /* Chop the high bits if 16-bit (Intel says so). */
1194 if (enmEffOpSize == IEMMODE_16BIT)
1195 offSeg &= UINT16_MAX;
1196
1197 /* Limit / canonical check. */
1198 uint64_t u64Base;
1199 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1200 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1201 {
1202 if (!IEM_IS_CANONICAL(offSeg))
1203 {
1204 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
1205 return iemRaiseNotCanonical(pIemCpu);
1206 }
1207 u64Base = 0;
1208 }
1209 else
1210 {
1211 if (offSeg > cbLimit)
1212 {
1213 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1214 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1215 }
1216 u64Base = X86DESC_BASE(&Desc.Legacy);
1217 }
1218
1219 /*
1220 * Now set the accessed bit before
1221 * writing the return address to the stack and committing the result into
1222 * CS, CSHID and RIP.
1223 */
1224 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1225 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1226 {
1227 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1228 if (rcStrict != VINF_SUCCESS)
1229 return rcStrict;
1230 /** @todo check what VT-x and AMD-V does. */
1231 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1232 }
1233
1234 /* stack */
1235 if (enmEffOpSize == IEMMODE_16BIT)
1236 {
1237 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1238 uPtrRet.pu16[1] = pCtx->cs.Sel;
1239 }
1240 else if (enmEffOpSize == IEMMODE_32BIT)
1241 {
1242 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1243 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
1244 }
1245 else
1246 {
1247 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1248 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
1249 }
1250 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1251 if (rcStrict != VINF_SUCCESS)
1252 return rcStrict;
1253
1254 /* commit */
1255 pCtx->rip = offSeg;
1256 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1257 pCtx->cs.Sel |= pIemCpu->uCpl;
1258 pCtx->cs.ValidSel = pCtx->cs.Sel;
1259 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1260 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1261 pCtx->cs.u32Limit = cbLimit;
1262 pCtx->cs.u64Base = u64Base;
1263 /** @todo check if the hidden bits are loaded correctly for 64-bit
1264 * mode. */
1265 return VINF_SUCCESS;
1266}
1267
1268
1269/**
1270 * Implements retf.
1271 *
1272 * @param enmEffOpSize The effective operand size.
1273 * @param cbPop The amount of arguments to pop from the stack
1274 * (bytes).
1275 */
1276IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1277{
1278 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1279 VBOXSTRICTRC rcStrict;
1280 RTCPTRUNION uPtrFrame;
1281 uint64_t uNewRsp;
1282 uint64_t uNewRip;
1283 uint16_t uNewCs;
1284 NOREF(cbInstr);
1285
1286 /*
1287 * Read the stack values first.
1288 */
1289 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
1290 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
1291 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
1292 if (rcStrict != VINF_SUCCESS)
1293 return rcStrict;
1294 if (enmEffOpSize == IEMMODE_16BIT)
1295 {
1296 uNewRip = uPtrFrame.pu16[0];
1297 uNewCs = uPtrFrame.pu16[1];
1298 }
1299 else if (enmEffOpSize == IEMMODE_32BIT)
1300 {
1301 uNewRip = uPtrFrame.pu32[0];
1302 uNewCs = uPtrFrame.pu16[2];
1303 }
1304 else
1305 {
1306 uNewRip = uPtrFrame.pu64[0];
1307 uNewCs = uPtrFrame.pu16[4];
1308 }
1309
1310 /*
1311 * Real mode and V8086 mode are easy.
1312 */
1313 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1314 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1315 {
1316 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1317 /** @todo check how this is supposed to work if sp=0xfffe. */
1318
1319 /* Check the limit of the new EIP. */
1320 /** @todo Intel pseudo code only does the limit check for 16-bit
1321 * operands, AMD does not make any distinction. What is right? */
1322 if (uNewRip > pCtx->cs.u32Limit)
1323 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1324
1325 /* commit the operation. */
1326 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1327 if (rcStrict != VINF_SUCCESS)
1328 return rcStrict;
1329 pCtx->rip = uNewRip;
1330 pCtx->cs.Sel = uNewCs;
1331 pCtx->cs.ValidSel = uNewCs;
1332 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1333 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
1334 /** @todo do we load attribs and limit as well? */
1335 if (cbPop)
1336 iemRegAddToRsp(pCtx, cbPop);
1337 return VINF_SUCCESS;
1338 }
1339
1340 /*
1341 * Protected mode is complicated, of course.
1342 */
1343 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
1344 {
1345 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
1346 return iemRaiseGeneralProtectionFault0(pIemCpu);
1347 }
1348
1349 /* Fetch the descriptor. */
1350 IEMSELDESC DescCs;
1351 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs);
1352 if (rcStrict != VINF_SUCCESS)
1353 return rcStrict;
1354
1355 /* Can only return to a code selector. */
1356 if ( !DescCs.Legacy.Gen.u1DescType
1357 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1358 {
1359 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1360 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
1361 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1362 }
1363
1364 /* L vs D. */
1365 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
1366 && DescCs.Legacy.Gen.u1DefBig
1367 && IEM_IS_LONG_MODE(pIemCpu))
1368 {
1369 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
1370 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1371 }
1372
1373 /* DPL/RPL/CPL checks. */
1374 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1375 {
1376 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
1377 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1378 }
1379
1380 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1381 {
1382 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
1383 {
1384 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
1385 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1386 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1387 }
1388 }
1389 else
1390 {
1391 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
1392 {
1393 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
1394 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1395 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1396 }
1397 }
1398
1399 /* Is it there? */
1400 if (!DescCs.Legacy.Gen.u1Present)
1401 {
1402 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
1403 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1404 }
1405
1406 /*
1407 * Return to outer privilege? (We'll typically have entered via a call gate.)
1408 */
1409 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
1410 {
1411 /* Read the return pointer, it comes before the parameters. */
1412 RTCPTRUNION uPtrStack;
1413 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
1414 if (rcStrict != VINF_SUCCESS)
1415 return rcStrict;
1416 uint16_t uNewOuterSs;
1417 uint64_t uNewOuterRsp;
1418 if (enmEffOpSize == IEMMODE_16BIT)
1419 {
1420 uNewOuterRsp = uPtrFrame.pu16[0];
1421 uNewOuterSs = uPtrFrame.pu16[1];
1422 }
1423 else if (enmEffOpSize == IEMMODE_32BIT)
1424 {
1425 uNewOuterRsp = uPtrFrame.pu32[0];
1426 uNewOuterSs = uPtrFrame.pu16[2];
1427 }
1428 else
1429 {
1430 uNewOuterRsp = uPtrFrame.pu64[0];
1431 uNewOuterSs = uPtrFrame.pu16[4];
1432 }
1433
1434 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
1435 and read the selector. */
1436 IEMSELDESC DescSs;
1437 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
1438 {
1439 if ( !DescCs.Legacy.Gen.u1Long
1440 || (uNewOuterSs & X86_SEL_RPL) == 3)
1441 {
1442 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
1443 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1444 return iemRaiseGeneralProtectionFault0(pIemCpu);
1445 }
1446 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
1447 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
1448 }
1449 else
1450 {
1451 /* Fetch the descriptor for the new stack segment. */
1452 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs);
1453 if (rcStrict != VINF_SUCCESS)
1454 return rcStrict;
1455 }
1456
1457 /* Check that RPL of stack and code selectors match. */
1458 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
1459 {
1460 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1461 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1462 }
1463
1464 /* Must be a writable data segment. */
1465 if ( !DescSs.Legacy.Gen.u1DescType
1466 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1467 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1468 {
1469 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
1470 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1471 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1472 }
1473
1474 /* L vs D. (Not mentioned by intel.) */
1475 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
1476 && DescSs.Legacy.Gen.u1DefBig
1477 && IEM_IS_LONG_MODE(pIemCpu))
1478 {
1479 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
1480 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1481 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1482 }
1483
1484 /* DPL/RPL/CPL checks. */
1485 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
1486 {
1487 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
1488 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
1489 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1490 }
1491
1492 /* Is it there? */
1493 if (!DescSs.Legacy.Gen.u1Present)
1494 {
1495 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1496 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1497 }
1498
1499 /* Calc SS limit.*/
1500 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
1501
1502 /* Is RIP canonical or within CS.limit? */
1503 uint64_t u64Base;
1504 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1505
1506 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1507 {
1508 if (!IEM_IS_CANONICAL(uNewRip))
1509 {
1510 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1511 return iemRaiseNotCanonical(pIemCpu);
1512 }
1513 u64Base = 0;
1514 }
1515 else
1516 {
1517 if (uNewRip > cbLimitCs)
1518 {
1519 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
1520 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
1521 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1522 }
1523 u64Base = X86DESC_BASE(&DescCs.Legacy);
1524 }
1525
1526 /*
1527 * Now set the accessed bit before
1528 * writing the return address to the stack and committing the result into
1529 * CS, CSHID and RIP.
1530 */
1531 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
1532 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1533 {
1534 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1535 if (rcStrict != VINF_SUCCESS)
1536 return rcStrict;
1537 /** @todo check what VT-x and AMD-V does. */
1538 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1539 }
1540 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
1541 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1542 {
1543 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
1544 if (rcStrict != VINF_SUCCESS)
1545 return rcStrict;
1546 /** @todo check what VT-x and AMD-V does. */
1547 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1548 }
1549
1550 /* commit */
1551 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1552 if (rcStrict != VINF_SUCCESS)
1553 return rcStrict;
1554 if (enmEffOpSize == IEMMODE_16BIT)
1555 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1556 else
1557 pCtx->rip = uNewRip;
1558 pCtx->cs.Sel = uNewCs;
1559 pCtx->cs.ValidSel = uNewCs;
1560 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1561 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1562 pCtx->cs.u32Limit = cbLimitCs;
1563 pCtx->cs.u64Base = u64Base;
1564 pCtx->rsp = uNewRsp;
1565 pCtx->ss.Sel = uNewOuterSs;
1566 pCtx->ss.ValidSel = uNewOuterSs;
1567 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1568 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
1569 pCtx->ss.u32Limit = cbLimitSs;
1570 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1571 pCtx->ss.u64Base = 0;
1572 else
1573 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
1574
1575 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
1576 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
1577 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
1578 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
1579 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
1580
1581 /** @todo check if the hidden bits are loaded correctly for 64-bit
1582 * mode. */
1583
1584 if (cbPop)
1585 iemRegAddToRsp(pCtx, cbPop);
1586
1587 /* Done! */
1588 }
1589 /*
1590 * Return to the same privilege level
1591 */
1592 else
1593 {
1594 /* Limit / canonical check. */
1595 uint64_t u64Base;
1596 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1597
1598 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1599 {
1600 if (!IEM_IS_CANONICAL(uNewRip))
1601 {
1602 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
1603 return iemRaiseNotCanonical(pIemCpu);
1604 }
1605 u64Base = 0;
1606 }
1607 else
1608 {
1609 if (uNewRip > cbLimitCs)
1610 {
1611 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
1612 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1613 }
1614 u64Base = X86DESC_BASE(&DescCs.Legacy);
1615 }
1616
1617 /*
1618 * Now set the accessed bit before
1619 * writing the return address to the stack and committing the result into
1620 * CS, CSHID and RIP.
1621 */
1622 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1623 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1624 {
1625 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1626 if (rcStrict != VINF_SUCCESS)
1627 return rcStrict;
1628 /** @todo check what VT-x and AMD-V does. */
1629 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1630 }
1631
1632 /* commit */
1633 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1634 if (rcStrict != VINF_SUCCESS)
1635 return rcStrict;
1636 if (enmEffOpSize == IEMMODE_16BIT)
1637 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1638 else
1639 pCtx->rip = uNewRip;
1640 pCtx->cs.Sel = uNewCs;
1641 pCtx->cs.ValidSel = uNewCs;
1642 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1643 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1644 pCtx->cs.u32Limit = cbLimitCs;
1645 pCtx->cs.u64Base = u64Base;
1646 /** @todo check if the hidden bits are loaded correctly for 64-bit
1647 * mode. */
1648 if (cbPop)
1649 iemRegAddToRsp(pCtx, cbPop);
1650 }
1651 return VINF_SUCCESS;
1652}
1653
1654
1655/**
1656 * Implements retn.
1657 *
1658 * We're doing this in C because of the \#GP that might be raised if the popped
1659 * program counter is out of bounds.
1660 *
1661 * @param enmEffOpSize The effective operand size.
1662 * @param cbPop The amount of arguments to pop from the stack
1663 * (bytes).
1664 */
1665IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1666{
1667 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1668 NOREF(cbInstr);
1669
1670 /* Fetch the RSP from the stack. */
1671 VBOXSTRICTRC rcStrict;
1672 RTUINT64U NewRip;
1673 RTUINT64U NewRsp;
1674 NewRsp.u = pCtx->rsp;
1675 switch (enmEffOpSize)
1676 {
1677 case IEMMODE_16BIT:
1678 NewRip.u = 0;
1679 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1680 break;
1681 case IEMMODE_32BIT:
1682 NewRip.u = 0;
1683 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1684 break;
1685 case IEMMODE_64BIT:
1686 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1687 break;
1688 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1689 }
1690 if (rcStrict != VINF_SUCCESS)
1691 return rcStrict;
1692
1693 /* Check the new RSP before loading it. */
1694 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1695 * of it. The canonical test is performed here and for call. */
1696 if (enmEffOpSize != IEMMODE_64BIT)
1697 {
1698 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
1699 {
1700 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
1701 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1702 }
1703 }
1704 else
1705 {
1706 if (!IEM_IS_CANONICAL(NewRip.u))
1707 {
1708 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1709 return iemRaiseNotCanonical(pIemCpu);
1710 }
1711 }
1712
1713 /* Commit it. */
1714 pCtx->rip = NewRip.u;
1715 pCtx->rsp = NewRsp.u;
1716 if (cbPop)
1717 iemRegAddToRsp(pCtx, cbPop);
1718
1719 return VINF_SUCCESS;
1720}
1721
1722
1723/**
1724 * Implements enter.
1725 *
1726 * We're doing this in C because the instruction is insane, even for the
1727 * u8NestingLevel=0 case dealing with the stack is tedious.
1728 *
1729 * @param enmEffOpSize The effective operand size.
1730 */
1731IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
1732{
1733 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1734
1735 /* Push RBP, saving the old value in TmpRbp. */
1736 RTUINT64U NewRsp; NewRsp.u = pCtx->rsp;
1737 RTUINT64U TmpRbp; TmpRbp.u = pCtx->rbp;
1738 RTUINT64U NewRbp;
1739 VBOXSTRICTRC rcStrict;
1740 if (enmEffOpSize == IEMMODE_64BIT)
1741 {
1742 rcStrict = iemMemStackPushU64Ex(pIemCpu, TmpRbp.u, &NewRsp);
1743 NewRbp = NewRsp;
1744 }
1745 else if (pCtx->ss.Attr.n.u1DefBig)
1746 {
1747 rcStrict = iemMemStackPushU32Ex(pIemCpu, TmpRbp.DWords.dw0, &NewRsp);
1748 NewRbp = NewRsp;
1749 }
1750 else
1751 {
1752 rcStrict = iemMemStackPushU16Ex(pIemCpu, TmpRbp.Words.w0, &NewRsp);
1753 NewRbp = TmpRbp;
1754 NewRbp.Words.w0 = NewRsp.Words.w0;
1755 }
1756 if (rcStrict != VINF_SUCCESS)
1757 return rcStrict;
1758
1759 /* Copy the parameters (aka nesting levels by Intel). */
1760 cParameters &= 0x1f;
1761 if (cParameters > 0)
1762 {
1763 switch (enmEffOpSize)
1764 {
1765 case IEMMODE_16BIT:
1766 if (pCtx->ss.Attr.n.u1DefBig)
1767 TmpRbp.DWords.dw0 -= 2;
1768 else
1769 TmpRbp.Words.w0 -= 2;
1770 do
1771 {
1772 uint16_t u16Tmp;
1773 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Tmp, &TmpRbp);
1774 if (rcStrict != VINF_SUCCESS)
1775 break;
1776 rcStrict = iemMemStackPushU16Ex(pIemCpu, u16Tmp, &NewRsp);
1777 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1778 break;
1779
1780 case IEMMODE_32BIT:
1781 if (pCtx->ss.Attr.n.u1DefBig)
1782 TmpRbp.DWords.dw0 -= 4;
1783 else
1784 TmpRbp.Words.w0 -= 4;
1785 do
1786 {
1787 uint32_t u32Tmp;
1788 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Tmp, &TmpRbp);
1789 if (rcStrict != VINF_SUCCESS)
1790 break;
1791 rcStrict = iemMemStackPushU32Ex(pIemCpu, u32Tmp, &NewRsp);
1792 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1793 break;
1794
1795 case IEMMODE_64BIT:
1796 TmpRbp.u -= 8;
1797 do
1798 {
1799 uint64_t u64Tmp;
1800 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Tmp, &TmpRbp);
1801 if (rcStrict != VINF_SUCCESS)
1802 break;
1803 rcStrict = iemMemStackPushU64Ex(pIemCpu, u64Tmp, &NewRsp);
1804 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1805 break;
1806
1807 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1808 }
1809 if (rcStrict != VINF_SUCCESS)
1810 return VINF_SUCCESS;
1811
1812 /* Push the new RBP */
1813 if (enmEffOpSize == IEMMODE_64BIT)
1814 rcStrict = iemMemStackPushU64Ex(pIemCpu, NewRbp.u, &NewRsp);
1815 else if (pCtx->ss.Attr.n.u1DefBig)
1816 rcStrict = iemMemStackPushU32Ex(pIemCpu, NewRbp.DWords.dw0, &NewRsp);
1817 else
1818 rcStrict = iemMemStackPushU16Ex(pIemCpu, NewRbp.Words.w0, &NewRsp);
1819 if (rcStrict != VINF_SUCCESS)
1820 return rcStrict;
1821
1822 }
1823
1824 /* Recalc RSP. */
1825 iemRegSubFromRspEx(&NewRsp, cbFrame, pCtx);
1826
1827 /** @todo Should probe write access at the new RSP according to AMD. */
1828
1829 /* Commit it. */
1830 pCtx->rbp = NewRbp.u;
1831 pCtx->rsp = NewRsp.u;
1832 iemRegAddToRip(pIemCpu, cbInstr);
1833
1834 return VINF_SUCCESS;
1835}
1836
1837
1838
1839/**
1840 * Implements leave.
1841 *
1842 * We're doing this in C because messing with the stack registers is annoying
1843 * since they depends on SS attributes.
1844 *
1845 * @param enmEffOpSize The effective operand size.
1846 */
1847IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1848{
1849 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1850
1851 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1852 RTUINT64U NewRsp;
1853 if (pCtx->ss.Attr.n.u1Long || pCtx->ss.Attr.n.u1Unusable)
1854 NewRsp.u = pCtx->rbp;
1855 else if (pCtx->ss.Attr.n.u1DefBig)
1856 NewRsp.u = pCtx->ebp;
1857 else
1858 {
1859 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1860 NewRsp.u = pCtx->rsp;
1861 NewRsp.Words.w0 = pCtx->bp;
1862 }
1863
1864 /* Pop RBP according to the operand size. */
1865 VBOXSTRICTRC rcStrict;
1866 RTUINT64U NewRbp;
1867 switch (enmEffOpSize)
1868 {
1869 case IEMMODE_16BIT:
1870 NewRbp.u = pCtx->rbp;
1871 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1872 break;
1873 case IEMMODE_32BIT:
1874 NewRbp.u = 0;
1875 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1876 break;
1877 case IEMMODE_64BIT:
1878 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1879 break;
1880 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1881 }
1882 if (rcStrict != VINF_SUCCESS)
1883 return rcStrict;
1884
1885
1886 /* Commit it. */
1887 pCtx->rbp = NewRbp.u;
1888 pCtx->rsp = NewRsp.u;
1889 iemRegAddToRip(pIemCpu, cbInstr);
1890
1891 return VINF_SUCCESS;
1892}
1893
1894
1895/**
1896 * Implements int3 and int XX.
1897 *
1898 * @param u8Int The interrupt vector number.
1899 * @param fIsBpInstr Is it the breakpoint instruction.
1900 */
1901IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1902{
1903 Assert(pIemCpu->cXcptRecursions == 0);
1904 return iemRaiseXcptOrInt(pIemCpu,
1905 cbInstr,
1906 u8Int,
1907 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
1908 0,
1909 0);
1910}
1911
1912
1913/**
1914 * Implements iret for real mode and V8086 mode.
1915 *
1916 * @param enmEffOpSize The effective operand size.
1917 */
1918IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
1919{
1920 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1921 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1922 X86EFLAGS Efl;
1923 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
1924 NOREF(cbInstr);
1925
1926 /*
1927 * iret throws an exception if VME isn't enabled.
1928 */
1929 if ( pCtx->eflags.Bits.u1VM
1930 && !(pCtx->cr4 & X86_CR4_VME))
1931 return iemRaiseGeneralProtectionFault0(pIemCpu);
1932
1933 /*
1934 * Do the stack bits, but don't commit RSP before everything checks
1935 * out right.
1936 */
1937 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1938 VBOXSTRICTRC rcStrict;
1939 RTCPTRUNION uFrame;
1940 uint16_t uNewCs;
1941 uint32_t uNewEip;
1942 uint32_t uNewFlags;
1943 uint64_t uNewRsp;
1944 if (enmEffOpSize == IEMMODE_32BIT)
1945 {
1946 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1947 if (rcStrict != VINF_SUCCESS)
1948 return rcStrict;
1949 uNewEip = uFrame.pu32[0];
1950 uNewCs = (uint16_t)uFrame.pu32[1];
1951 uNewFlags = uFrame.pu32[2];
1952 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1953 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1954 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1955 | X86_EFL_ID;
1956 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1957 }
1958 else
1959 {
1960 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1961 if (rcStrict != VINF_SUCCESS)
1962 return rcStrict;
1963 uNewEip = uFrame.pu16[0];
1964 uNewCs = uFrame.pu16[1];
1965 uNewFlags = uFrame.pu16[2];
1966 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1967 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1968 uNewFlags |= Efl.u & (UINT32_C(0xffff0000) | X86_EFL_1);
1969 /** @todo The intel pseudo code does not indicate what happens to
1970 * reserved flags. We just ignore them. */
1971 }
1972 /** @todo Check how this is supposed to work if sp=0xfffe. */
1973
1974 /*
1975 * Check the limit of the new EIP.
1976 */
1977 /** @todo Only the AMD pseudo code check the limit here, what's
1978 * right? */
1979 if (uNewEip > pCtx->cs.u32Limit)
1980 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1981
1982 /*
1983 * V8086 checks and flag adjustments
1984 */
1985 if (Efl.Bits.u1VM)
1986 {
1987 if (Efl.Bits.u2IOPL == 3)
1988 {
1989 /* Preserve IOPL and clear RF. */
1990 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1991 uNewFlags |= Efl.u & (X86_EFL_IOPL);
1992 }
1993 else if ( enmEffOpSize == IEMMODE_16BIT
1994 && ( !(uNewFlags & X86_EFL_IF)
1995 || !Efl.Bits.u1VIP )
1996 && !(uNewFlags & X86_EFL_TF) )
1997 {
1998 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1999 uNewFlags &= ~X86_EFL_VIF;
2000 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
2001 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
2002 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
2003 }
2004 else
2005 return iemRaiseGeneralProtectionFault0(pIemCpu);
2006 }
2007
2008 /*
2009 * Commit the operation.
2010 */
2011 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
2012 if (rcStrict != VINF_SUCCESS)
2013 return rcStrict;
2014 pCtx->rip = uNewEip;
2015 pCtx->cs.Sel = uNewCs;
2016 pCtx->cs.ValidSel = uNewCs;
2017 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2018 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2019 /** @todo do we load attribs and limit as well? */
2020 Assert(uNewFlags & X86_EFL_1);
2021 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags);
2022
2023 return VINF_SUCCESS;
2024}
2025
2026
2027/**
2028 * Loads a segment register when entering V8086 mode.
2029 *
2030 * @param pSReg The segment register.
2031 * @param uSeg The segment to load.
2032 */
2033static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
2034{
2035 pSReg->Sel = uSeg;
2036 pSReg->ValidSel = uSeg;
2037 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2038 pSReg->u64Base = (uint32_t)uSeg << 4;
2039 pSReg->u32Limit = 0xffff;
2040 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
2041 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
2042 * IRET'ing to V8086. */
2043}
2044
2045
2046/**
2047 * Implements iret for protected mode returning to V8086 mode.
2048 *
2049 * @param pCtx Pointer to the CPU context.
2050 * @param uNewEip The new EIP.
2051 * @param uNewCs The new CS.
2052 * @param uNewFlags The new EFLAGS.
2053 * @param uNewRsp The RSP after the initial IRET frame.
2054 */
2055IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uint16_t, uNewCs,
2056 uint32_t, uNewFlags, uint64_t, uNewRsp)
2057{
2058#if 0
2059 if (!LogIs6Enabled())
2060 {
2061 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
2062 RTLogFlags(NULL, "enabled");
2063 return VERR_IEM_RESTART_INSTRUCTION;
2064 }
2065#endif
2066
2067 /*
2068 * Pop the V8086 specific frame bits off the stack.
2069 */
2070 VBOXSTRICTRC rcStrict;
2071 RTCPTRUNION uFrame;
2072 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 24, &uFrame.pv, &uNewRsp);
2073 if (rcStrict != VINF_SUCCESS)
2074 return rcStrict;
2075 uint32_t uNewEsp = uFrame.pu32[0];
2076 uint16_t uNewSs = uFrame.pu32[1];
2077 uint16_t uNewEs = uFrame.pu32[2];
2078 uint16_t uNewDs = uFrame.pu32[3];
2079 uint16_t uNewFs = uFrame.pu32[4];
2080 uint16_t uNewGs = uFrame.pu32[5];
2081 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2082 if (rcStrict != VINF_SUCCESS)
2083 return rcStrict;
2084
2085 /*
2086 * Commit the operation.
2087 */
2088 iemCImplCommonV8086LoadSeg(&pCtx->cs, uNewCs);
2089 iemCImplCommonV8086LoadSeg(&pCtx->ss, uNewSs);
2090 iemCImplCommonV8086LoadSeg(&pCtx->es, uNewEs);
2091 iemCImplCommonV8086LoadSeg(&pCtx->ds, uNewDs);
2092 iemCImplCommonV8086LoadSeg(&pCtx->fs, uNewFs);
2093 iemCImplCommonV8086LoadSeg(&pCtx->gs, uNewGs);
2094 pCtx->rip = uNewEip;
2095 pCtx->rsp = uNewEsp;
2096 pCtx->rflags.u = uNewFlags;
2097 pIemCpu->uCpl = 3;
2098
2099 return VINF_SUCCESS;
2100}
2101
2102
2103/**
2104 * Implements iret for protected mode returning via a nested task.
2105 *
2106 * @param enmEffOpSize The effective operand size.
2107 */
2108IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
2109{
2110 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2111}
2112
2113
2114/**
2115 * Implements iret for protected mode
2116 *
2117 * @param enmEffOpSize The effective operand size.
2118 */
2119IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
2120{
2121 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2122 NOREF(cbInstr);
2123
2124 /*
2125 * Nested task return.
2126 */
2127 if (pCtx->eflags.Bits.u1NT)
2128 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
2129
2130 /*
2131 * Normal return.
2132 *
2133 * Do the stack bits, but don't commit RSP before everything checks
2134 * out right.
2135 */
2136 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2137 VBOXSTRICTRC rcStrict;
2138 RTCPTRUNION uFrame;
2139 uint16_t uNewCs;
2140 uint32_t uNewEip;
2141 uint32_t uNewFlags;
2142 uint64_t uNewRsp;
2143 if (enmEffOpSize == IEMMODE_32BIT)
2144 {
2145 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
2146 if (rcStrict != VINF_SUCCESS)
2147 return rcStrict;
2148 uNewEip = uFrame.pu32[0];
2149 uNewCs = (uint16_t)uFrame.pu32[1];
2150 uNewFlags = uFrame.pu32[2];
2151 }
2152 else
2153 {
2154 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
2155 if (rcStrict != VINF_SUCCESS)
2156 return rcStrict;
2157 uNewEip = uFrame.pu16[0];
2158 uNewCs = uFrame.pu16[1];
2159 uNewFlags = uFrame.pu16[2];
2160 }
2161 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2162 if (rcStrict != VINF_SUCCESS)
2163 return rcStrict;
2164
2165 /*
2166 * We're hopefully not returning to V8086 mode...
2167 */
2168 if ( (uNewFlags & X86_EFL_VM)
2169 && pIemCpu->uCpl == 0)
2170 {
2171 Assert(enmEffOpSize == IEMMODE_32BIT);
2172 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, pCtx, uNewEip, uNewCs, uNewFlags, uNewRsp);
2173 }
2174
2175 /*
2176 * Protected mode.
2177 */
2178 /* Read the CS descriptor. */
2179 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2180 {
2181 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
2182 return iemRaiseGeneralProtectionFault0(pIemCpu);
2183 }
2184
2185 IEMSELDESC DescCS;
2186 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
2187 if (rcStrict != VINF_SUCCESS)
2188 {
2189 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
2190 return rcStrict;
2191 }
2192
2193 /* Must be a code descriptor. */
2194 if (!DescCS.Legacy.Gen.u1DescType)
2195 {
2196 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2197 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2198 }
2199 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2200 {
2201 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2202 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2203 }
2204
2205 /* Privilege checks. */
2206 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2207 {
2208 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
2209 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2210 }
2211 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2212 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
2213 {
2214 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
2215 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2216 }
2217
2218 /* Present? */
2219 if (!DescCS.Legacy.Gen.u1Present)
2220 {
2221 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
2222 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2223 }
2224
2225 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2226
2227 /*
2228 * Return to outer level?
2229 */
2230 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2231 {
2232 uint16_t uNewSS;
2233 uint32_t uNewESP;
2234 if (enmEffOpSize == IEMMODE_32BIT)
2235 {
2236 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2237 if (rcStrict != VINF_SUCCESS)
2238 return rcStrict;
2239 uNewESP = uFrame.pu32[0];
2240 uNewSS = (uint16_t)uFrame.pu32[1];
2241 }
2242 else
2243 {
2244 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2245 if (rcStrict != VINF_SUCCESS)
2246 return rcStrict;
2247 uNewESP = uFrame.pu16[0];
2248 uNewSS = uFrame.pu16[1];
2249 }
2250 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
2251 if (rcStrict != VINF_SUCCESS)
2252 return rcStrict;
2253
2254 /* Read the SS descriptor. */
2255 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2256 {
2257 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
2258 return iemRaiseGeneralProtectionFault0(pIemCpu);
2259 }
2260
2261 IEMSELDESC DescSS;
2262 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
2263 if (rcStrict != VINF_SUCCESS)
2264 {
2265 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
2266 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
2267 return rcStrict;
2268 }
2269
2270 /* Privilege checks. */
2271 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2272 {
2273 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
2274 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2275 }
2276 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2277 {
2278 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
2279 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
2280 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2281 }
2282
2283 /* Must be a writeable data segment descriptor. */
2284 if (!DescSS.Legacy.Gen.u1DescType)
2285 {
2286 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
2287 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2288 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2289 }
2290 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2291 {
2292 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
2293 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2294 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2295 }
2296
2297 /* Present? */
2298 if (!DescSS.Legacy.Gen.u1Present)
2299 {
2300 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
2301 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
2302 }
2303
2304 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
2305
2306 /* Check EIP. */
2307 if (uNewEip > cbLimitCS)
2308 {
2309 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
2310 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
2311 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2312 }
2313
2314 /*
2315 * Commit the changes, marking CS and SS accessed first since
2316 * that may fail.
2317 */
2318 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2319 {
2320 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2321 if (rcStrict != VINF_SUCCESS)
2322 return rcStrict;
2323 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2324 }
2325 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2326 {
2327 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2328 if (rcStrict != VINF_SUCCESS)
2329 return rcStrict;
2330 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2331 }
2332
2333 pCtx->rip = uNewEip;
2334 pCtx->cs.Sel = uNewCs;
2335 pCtx->cs.ValidSel = uNewCs;
2336 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2337 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2338 pCtx->cs.u32Limit = cbLimitCS;
2339 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2340 pCtx->rsp = uNewESP;
2341 pCtx->ss.Sel = uNewSS;
2342 pCtx->ss.ValidSel = uNewSS;
2343 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2344 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2345 pCtx->ss.u32Limit = cbLimitSs;
2346 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2347
2348 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2349 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2350 if (enmEffOpSize != IEMMODE_16BIT)
2351 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2352 if (pIemCpu->uCpl == 0)
2353 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2354 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2355 fEFlagsMask |= X86_EFL_IF;
2356 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
2357 fEFlagsNew &= ~fEFlagsMask;
2358 fEFlagsNew |= uNewFlags & fEFlagsMask;
2359 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
2360
2361 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
2362 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2363 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2364 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2365 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2366
2367 /* Done! */
2368
2369 }
2370 /*
2371 * Return to the same level.
2372 */
2373 else
2374 {
2375 /* Check EIP. */
2376 if (uNewEip > cbLimitCS)
2377 {
2378 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
2379 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2380 }
2381
2382 /*
2383 * Commit the changes, marking CS first since it may fail.
2384 */
2385 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2386 {
2387 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2388 if (rcStrict != VINF_SUCCESS)
2389 return rcStrict;
2390 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2391 }
2392
2393 pCtx->rip = uNewEip;
2394 pCtx->cs.Sel = uNewCs;
2395 pCtx->cs.ValidSel = uNewCs;
2396 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2397 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2398 pCtx->cs.u32Limit = cbLimitCS;
2399 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2400 pCtx->rsp = uNewRsp;
2401
2402 X86EFLAGS NewEfl;
2403 NewEfl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
2404 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2405 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2406 if (enmEffOpSize != IEMMODE_16BIT)
2407 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2408 if (pIemCpu->uCpl == 0)
2409 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2410 else if (pIemCpu->uCpl <= NewEfl.Bits.u2IOPL)
2411 fEFlagsMask |= X86_EFL_IF;
2412 NewEfl.u &= ~fEFlagsMask;
2413 NewEfl.u |= fEFlagsMask & uNewFlags;
2414 IEMMISC_SET_EFL(pIemCpu, pCtx, NewEfl.u);
2415 /* Done! */
2416 }
2417 return VINF_SUCCESS;
2418}
2419
2420
2421/**
2422 * Implements iret for long mode
2423 *
2424 * @param enmEffOpSize The effective operand size.
2425 */
2426IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
2427{
2428 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2429 NOREF(cbInstr);
2430
2431 /*
2432 * Nested task return is not supported in long mode.
2433 */
2434 if (pCtx->eflags.Bits.u1NT)
2435 {
2436 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pCtx->eflags.u));
2437 return iemRaiseGeneralProtectionFault0(pIemCpu);
2438 }
2439
2440 /*
2441 * Normal return.
2442 *
2443 * Do the stack bits, but don't commit RSP before everything checks
2444 * out right.
2445 */
2446 VBOXSTRICTRC rcStrict;
2447 RTCPTRUNION uFrame;
2448 uint64_t uNewRip;
2449 uint16_t uNewCs;
2450 uint16_t uNewSs;
2451 uint32_t uNewFlags;
2452 uint64_t uNewRsp;
2453 if (enmEffOpSize == IEMMODE_64BIT)
2454 {
2455 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*8, &uFrame.pv, &uNewRsp);
2456 if (rcStrict != VINF_SUCCESS)
2457 return rcStrict;
2458 uNewRip = uFrame.pu64[0];
2459 uNewCs = (uint16_t)uFrame.pu64[1];
2460 uNewFlags = (uint32_t)uFrame.pu64[2];
2461 uNewRsp = uFrame.pu64[3];
2462 uNewSs = (uint16_t)uFrame.pu64[4];
2463 }
2464 else if (enmEffOpSize == IEMMODE_32BIT)
2465 {
2466 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*4, &uFrame.pv, &uNewRsp);
2467 if (rcStrict != VINF_SUCCESS)
2468 return rcStrict;
2469 uNewRip = uFrame.pu32[0];
2470 uNewCs = (uint16_t)uFrame.pu32[1];
2471 uNewFlags = uFrame.pu32[2];
2472 uNewRsp = uFrame.pu32[3];
2473 uNewSs = (uint16_t)uFrame.pu32[4];
2474 }
2475 else
2476 {
2477 Assert(enmEffOpSize == IEMMODE_16BIT);
2478 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*2, &uFrame.pv, &uNewRsp);
2479 if (rcStrict != VINF_SUCCESS)
2480 return rcStrict;
2481 uNewRip = uFrame.pu16[0];
2482 uNewCs = uFrame.pu16[1];
2483 uNewFlags = uFrame.pu16[2];
2484 uNewRsp = uFrame.pu16[3];
2485 uNewSs = uFrame.pu16[4];
2486 }
2487 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2488 if (rcStrict != VINF_SUCCESS)
2489 return rcStrict;
2490 Log2(("iretq stack: cs:rip=%04x:%016RX16 rflags=%016RX16 ss:rsp=%04x:%016RX16\n",
2491 uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
2492
2493 /*
2494 * Check stuff.
2495 */
2496 /* Read the CS descriptor. */
2497 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2498 {
2499 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
2500 return iemRaiseGeneralProtectionFault0(pIemCpu);
2501 }
2502
2503 IEMSELDESC DescCS;
2504 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
2505 if (rcStrict != VINF_SUCCESS)
2506 {
2507 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
2508 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
2509 return rcStrict;
2510 }
2511
2512 /* Must be a code descriptor. */
2513 if ( !DescCS.Legacy.Gen.u1DescType
2514 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2515 {
2516 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
2517 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2518 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2519 }
2520
2521 /* Privilege checks. */
2522 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
2523 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2524 {
2525 Log(("iret %04x:%016RX64/%04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp, pIemCpu->uCpl));
2526 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2527 }
2528 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2529 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
2530 {
2531 Log(("iret %04x:%016RX64/%04x:%016RX64 - RPL < DPL (%d) -> #GP\n",
2532 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u2Dpl));
2533 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2534 }
2535
2536 /* Present? */
2537 if (!DescCS.Legacy.Gen.u1Present)
2538 {
2539 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
2540 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2541 }
2542
2543 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2544
2545 /* Read the SS descriptor. */
2546 IEMSELDESC DescSS;
2547 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
2548 {
2549 if ( !DescCS.Legacy.Gen.u1Long
2550 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
2551 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
2552 {
2553 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
2554 return iemRaiseGeneralProtectionFault0(pIemCpu);
2555 }
2556 DescSS.Legacy.u = 0;
2557 }
2558 else
2559 {
2560 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSs);
2561 if (rcStrict != VINF_SUCCESS)
2562 {
2563 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
2564 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
2565 return rcStrict;
2566 }
2567 }
2568
2569 /* Privilege checks. */
2570 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2571 {
2572 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
2573 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
2574 }
2575
2576 uint32_t cbLimitSs;
2577 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
2578 cbLimitSs = UINT32_MAX;
2579 else
2580 {
2581 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2582 {
2583 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
2584 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
2585 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
2586 }
2587
2588 /* Must be a writeable data segment descriptor. */
2589 if (!DescSS.Legacy.Gen.u1DescType)
2590 {
2591 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
2592 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
2593 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
2594 }
2595 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2596 {
2597 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
2598 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
2599 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
2600 }
2601
2602 /* Present? */
2603 if (!DescSS.Legacy.Gen.u1Present)
2604 {
2605 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
2606 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSs);
2607 }
2608 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
2609 }
2610
2611 /* Check EIP. */
2612 if (DescCS.Legacy.Gen.u1Long)
2613 {
2614 if (!IEM_IS_CANONICAL(uNewRip))
2615 {
2616 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
2617 uNewCs, uNewRip, uNewSs, uNewRsp));
2618 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2619 }
2620 }
2621 else
2622 {
2623 if (uNewRip > cbLimitCS)
2624 {
2625 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
2626 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
2627 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2628 }
2629 }
2630
2631 /*
2632 * Commit the changes, marking CS and SS accessed first since
2633 * that may fail.
2634 */
2635 /** @todo where exactly are these actually marked accessed by a real CPU? */
2636 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2637 {
2638 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2639 if (rcStrict != VINF_SUCCESS)
2640 return rcStrict;
2641 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2642 }
2643 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2644 {
2645 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSs);
2646 if (rcStrict != VINF_SUCCESS)
2647 return rcStrict;
2648 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2649 }
2650
2651 pCtx->rip = uNewRip;
2652 pCtx->cs.Sel = uNewCs;
2653 pCtx->cs.ValidSel = uNewCs;
2654 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2655 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2656 pCtx->cs.u32Limit = cbLimitCS;
2657 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2658 pCtx->rsp = uNewRsp;
2659 pCtx->ss.Sel = uNewSs;
2660 pCtx->ss.ValidSel = uNewSs;
2661 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
2662 {
2663 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2664 pCtx->ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
2665 pCtx->ss.u32Limit = UINT32_MAX;
2666 pCtx->ss.u64Base = 0;
2667 Log2(("iretq new SS: NULL\n"));
2668 }
2669 else
2670 {
2671 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2672 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2673 pCtx->ss.u32Limit = cbLimitSs;
2674 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2675 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u));
2676 }
2677
2678 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2679 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2680 if (enmEffOpSize != IEMMODE_16BIT)
2681 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2682 if (pIemCpu->uCpl == 0)
2683 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
2684 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2685 fEFlagsMask |= X86_EFL_IF;
2686 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
2687 fEFlagsNew &= ~fEFlagsMask;
2688 fEFlagsNew |= uNewFlags & fEFlagsMask;
2689 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
2690
2691 if (pIemCpu->uCpl != uNewCpl)
2692 {
2693 pIemCpu->uCpl = uNewCpl;
2694 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->ds);
2695 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->es);
2696 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->fs);
2697 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->gs);
2698 }
2699
2700 return VINF_SUCCESS;
2701}
2702
2703
2704/**
2705 * Implements iret.
2706 *
2707 * @param enmEffOpSize The effective operand size.
2708 */
2709IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
2710{
2711 /*
2712 * Call a mode specific worker.
2713 */
2714 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2715 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2716 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
2717 if (IEM_IS_LONG_MODE(pIemCpu))
2718 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
2719
2720 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
2721}
2722
2723
2724/**
2725 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
2726 *
2727 * @param iSegReg The segment register number (valid).
2728 * @param uSel The new selector value.
2729 */
2730IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
2731{
2732 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2733 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
2734 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
2735
2736 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
2737
2738 /*
2739 * Real mode and V8086 mode are easy.
2740 */
2741 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2742 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2743 {
2744 *pSel = uSel;
2745 pHid->u64Base = (uint32_t)uSel << 4;
2746 pHid->ValidSel = uSel;
2747 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
2748#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
2749 /** @todo Does the CPU actually load limits and attributes in the
2750 * real/V8086 mode segment load case? It doesn't for CS in far
2751 * jumps... Affects unreal mode. */
2752 pHid->u32Limit = 0xffff;
2753 pHid->Attr.u = 0;
2754 pHid->Attr.n.u1Present = 1;
2755 pHid->Attr.n.u1DescType = 1;
2756 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
2757 ? X86_SEL_TYPE_RW
2758 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
2759#endif
2760 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2761 iemRegAddToRip(pIemCpu, cbInstr);
2762 return VINF_SUCCESS;
2763 }
2764
2765 /*
2766 * Protected mode.
2767 *
2768 * Check if it's a null segment selector value first, that's OK for DS, ES,
2769 * FS and GS. If not null, then we have to load and parse the descriptor.
2770 */
2771 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2772 {
2773 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
2774 if (iSegReg == X86_SREG_SS)
2775 {
2776 /* In 64-bit kernel mode, the stack can be 0 because of the way
2777 interrupts are dispatched. AMD seems to have a slighly more
2778 relaxed relationship to SS.RPL than intel does. */
2779 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
2780 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
2781 || pIemCpu->uCpl > 2
2782 || ( uSel != pIemCpu->uCpl
2783 && !IEM_IS_GUEST_CPU_AMD(pIemCpu)) )
2784 {
2785 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
2786 return iemRaiseGeneralProtectionFault0(pIemCpu);
2787 }
2788 }
2789
2790 *pSel = uSel; /* Not RPL, remember :-) */
2791 iemHlpLoadNullDataSelectorProt(pHid, uSel);
2792 if (iSegReg == X86_SREG_SS)
2793 pHid->Attr.u |= pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT;
2794
2795 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
2796 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2797
2798 iemRegAddToRip(pIemCpu, cbInstr);
2799 return VINF_SUCCESS;
2800 }
2801
2802 /* Fetch the descriptor. */
2803 IEMSELDESC Desc;
2804 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
2805 if (rcStrict != VINF_SUCCESS)
2806 return rcStrict;
2807
2808 /* Check GPs first. */
2809 if (!Desc.Legacy.Gen.u1DescType)
2810 {
2811 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
2812 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2813 }
2814 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
2815 {
2816 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2817 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2818 {
2819 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2820 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2821 }
2822 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
2823 {
2824 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
2825 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2826 }
2827 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
2828 {
2829 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2830 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2831 }
2832 }
2833 else
2834 {
2835 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2836 {
2837 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
2838 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2839 }
2840 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2841 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2842 {
2843#if 0 /* this is what intel says. */
2844 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2845 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2846 {
2847 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
2848 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2849 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2850 }
2851#else /* this is what makes more sense. */
2852 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
2853 {
2854 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
2855 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
2856 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2857 }
2858 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2859 {
2860 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
2861 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2862 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2863 }
2864#endif
2865 }
2866 }
2867
2868 /* Is it there? */
2869 if (!Desc.Legacy.Gen.u1Present)
2870 {
2871 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
2872 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
2873 }
2874
2875 /* The base and limit. */
2876 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2877 uint64_t u64Base;
2878 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2879 && iSegReg < X86_SREG_FS)
2880 u64Base = 0;
2881 else
2882 u64Base = X86DESC_BASE(&Desc.Legacy);
2883
2884 /*
2885 * Ok, everything checked out fine. Now set the accessed bit before
2886 * committing the result into the registers.
2887 */
2888 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2889 {
2890 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2891 if (rcStrict != VINF_SUCCESS)
2892 return rcStrict;
2893 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2894 }
2895
2896 /* commit */
2897 *pSel = uSel;
2898 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2899 pHid->u32Limit = cbLimit;
2900 pHid->u64Base = u64Base;
2901 pHid->ValidSel = uSel;
2902 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
2903
2904 /** @todo check if the hidden bits are loaded correctly for 64-bit
2905 * mode. */
2906 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
2907
2908 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2909 iemRegAddToRip(pIemCpu, cbInstr);
2910 return VINF_SUCCESS;
2911}
2912
2913
2914/**
2915 * Implements 'mov SReg, r/m'.
2916 *
2917 * @param iSegReg The segment register number (valid).
2918 * @param uSel The new selector value.
2919 */
2920IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
2921{
2922 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2923 if (rcStrict == VINF_SUCCESS)
2924 {
2925 if (iSegReg == X86_SREG_SS)
2926 {
2927 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2928 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2929 }
2930 }
2931 return rcStrict;
2932}
2933
2934
2935/**
2936 * Implements 'pop SReg'.
2937 *
2938 * @param iSegReg The segment register number (valid).
2939 * @param enmEffOpSize The efficient operand size (valid).
2940 */
2941IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
2942{
2943 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2944 VBOXSTRICTRC rcStrict;
2945
2946 /*
2947 * Read the selector off the stack and join paths with mov ss, reg.
2948 */
2949 RTUINT64U TmpRsp;
2950 TmpRsp.u = pCtx->rsp;
2951 switch (enmEffOpSize)
2952 {
2953 case IEMMODE_16BIT:
2954 {
2955 uint16_t uSel;
2956 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
2957 if (rcStrict == VINF_SUCCESS)
2958 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2959 break;
2960 }
2961
2962 case IEMMODE_32BIT:
2963 {
2964 uint32_t u32Value;
2965 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
2966 if (rcStrict == VINF_SUCCESS)
2967 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
2968 break;
2969 }
2970
2971 case IEMMODE_64BIT:
2972 {
2973 uint64_t u64Value;
2974 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
2975 if (rcStrict == VINF_SUCCESS)
2976 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
2977 break;
2978 }
2979 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2980 }
2981
2982 /*
2983 * Commit the stack on success.
2984 */
2985 if (rcStrict == VINF_SUCCESS)
2986 {
2987 pCtx->rsp = TmpRsp.u;
2988 if (iSegReg == X86_SREG_SS)
2989 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2990 }
2991 return rcStrict;
2992}
2993
2994
2995/**
2996 * Implements lgs, lfs, les, lds & lss.
2997 */
2998IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
2999 uint16_t, uSel,
3000 uint64_t, offSeg,
3001 uint8_t, iSegReg,
3002 uint8_t, iGReg,
3003 IEMMODE, enmEffOpSize)
3004{
3005 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
3006 VBOXSTRICTRC rcStrict;
3007
3008 /*
3009 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
3010 */
3011 /** @todo verify and test that mov, pop and lXs works the segment
3012 * register loading in the exact same way. */
3013 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
3014 if (rcStrict == VINF_SUCCESS)
3015 {
3016 switch (enmEffOpSize)
3017 {
3018 case IEMMODE_16BIT:
3019 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
3020 break;
3021 case IEMMODE_32BIT:
3022 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
3023 break;
3024 case IEMMODE_64BIT:
3025 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
3026 break;
3027 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3028 }
3029 }
3030
3031 return rcStrict;
3032}
3033
3034
3035/**
3036 * Implements lgdt.
3037 *
3038 * @param iEffSeg The segment of the new gdtr contents
3039 * @param GCPtrEffSrc The address of the new gdtr contents.
3040 * @param enmEffOpSize The effective operand size.
3041 */
3042IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
3043{
3044 if (pIemCpu->uCpl != 0)
3045 return iemRaiseGeneralProtectionFault0(pIemCpu);
3046 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3047
3048 /*
3049 * Fetch the limit and base address.
3050 */
3051 uint16_t cbLimit;
3052 RTGCPTR GCPtrBase;
3053 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
3054 if (rcStrict == VINF_SUCCESS)
3055 {
3056 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3057 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
3058 else
3059 {
3060 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3061 pCtx->gdtr.cbGdt = cbLimit;
3062 pCtx->gdtr.pGdt = GCPtrBase;
3063 }
3064 if (rcStrict == VINF_SUCCESS)
3065 iemRegAddToRip(pIemCpu, cbInstr);
3066 }
3067 return rcStrict;
3068}
3069
3070
3071/**
3072 * Implements sgdt.
3073 *
3074 * @param iEffSeg The segment where to store the gdtr content.
3075 * @param GCPtrEffDst The address where to store the gdtr content.
3076 * @param enmEffOpSize The effective operand size.
3077 */
3078IEM_CIMPL_DEF_3(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
3079{
3080 /*
3081 * Join paths with sidt.
3082 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
3083 * you really must know.
3084 */
3085 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3086 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
3087 if (rcStrict == VINF_SUCCESS)
3088 iemRegAddToRip(pIemCpu, cbInstr);
3089 return rcStrict;
3090}
3091
3092
3093/**
3094 * Implements lidt.
3095 *
3096 * @param iEffSeg The segment of the new idtr contents
3097 * @param GCPtrEffSrc The address of the new idtr contents.
3098 * @param enmEffOpSize The effective operand size.
3099 */
3100IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
3101{
3102 if (pIemCpu->uCpl != 0)
3103 return iemRaiseGeneralProtectionFault0(pIemCpu);
3104 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3105
3106 /*
3107 * Fetch the limit and base address.
3108 */
3109 uint16_t cbLimit;
3110 RTGCPTR GCPtrBase;
3111 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
3112 if (rcStrict == VINF_SUCCESS)
3113 {
3114 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3115 CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
3116 else
3117 {
3118 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3119 pCtx->idtr.cbIdt = cbLimit;
3120 pCtx->idtr.pIdt = GCPtrBase;
3121 }
3122 iemRegAddToRip(pIemCpu, cbInstr);
3123 }
3124 return rcStrict;
3125}
3126
3127
3128/**
3129 * Implements sidt.
3130 *
3131 * @param iEffSeg The segment where to store the idtr content.
3132 * @param GCPtrEffDst The address where to store the idtr content.
3133 * @param enmEffOpSize The effective operand size.
3134 */
3135IEM_CIMPL_DEF_3(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
3136{
3137 /*
3138 * Join paths with sgdt.
3139 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
3140 * you really must know.
3141 */
3142 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3143 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
3144 if (rcStrict == VINF_SUCCESS)
3145 iemRegAddToRip(pIemCpu, cbInstr);
3146 return rcStrict;
3147}
3148
3149
3150/**
3151 * Implements lldt.
3152 *
3153 * @param uNewLdt The new LDT selector value.
3154 */
3155IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
3156{
3157 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3158
3159 /*
3160 * Check preconditions.
3161 */
3162 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3163 {
3164 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
3165 return iemRaiseUndefinedOpcode(pIemCpu);
3166 }
3167 if (pIemCpu->uCpl != 0)
3168 {
3169 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
3170 return iemRaiseGeneralProtectionFault0(pIemCpu);
3171 }
3172 if (uNewLdt & X86_SEL_LDT)
3173 {
3174 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
3175 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
3176 }
3177
3178 /*
3179 * Now, loading a NULL selector is easy.
3180 */
3181 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
3182 {
3183 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
3184 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3185 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt);
3186 else
3187 pCtx->ldtr.Sel = uNewLdt;
3188 pCtx->ldtr.ValidSel = uNewLdt;
3189 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3190 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
3191 if (!IEM_IS_GUEST_CPU_AMD(pIemCpu) || !IEM_VERIFICATION_ENABLED(pIemCpu)) /* See bs-cpu-hidden-regs-1 on AMD. */
3192 {
3193 pCtx->ldtr.u64Base = 0;
3194 pCtx->ldtr.u32Limit = 0;
3195 }
3196
3197 iemRegAddToRip(pIemCpu, cbInstr);
3198 return VINF_SUCCESS;
3199 }
3200
3201 /*
3202 * Read the descriptor.
3203 */
3204 IEMSELDESC Desc;
3205 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
3206 if (rcStrict != VINF_SUCCESS)
3207 return rcStrict;
3208
3209 /* Check GPs first. */
3210 if (Desc.Legacy.Gen.u1DescType)
3211 {
3212 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
3213 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3214 }
3215 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
3216 {
3217 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
3218 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3219 }
3220 uint64_t u64Base;
3221 if (!IEM_IS_LONG_MODE(pIemCpu))
3222 u64Base = X86DESC_BASE(&Desc.Legacy);
3223 else
3224 {
3225 if (Desc.Long.Gen.u5Zeros)
3226 {
3227 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
3228 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3229 }
3230
3231 u64Base = X86DESC64_BASE(&Desc.Long);
3232 if (!IEM_IS_CANONICAL(u64Base))
3233 {
3234 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
3235 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3236 }
3237 }
3238
3239 /* NP */
3240 if (!Desc.Legacy.Gen.u1Present)
3241 {
3242 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
3243 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
3244 }
3245
3246 /*
3247 * It checks out alright, update the registers.
3248 */
3249/** @todo check if the actual value is loaded or if the RPL is dropped */
3250 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3251 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK_OFF_RPL);
3252 else
3253 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
3254 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
3255 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3256 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3257 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
3258 pCtx->ldtr.u64Base = u64Base;
3259
3260 iemRegAddToRip(pIemCpu, cbInstr);
3261 return VINF_SUCCESS;
3262}
3263
3264
3265/**
3266 * Implements lldt.
3267 *
3268 * @param uNewLdt The new LDT selector value.
3269 */
3270IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
3271{
3272 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3273
3274 /*
3275 * Check preconditions.
3276 */
3277 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3278 {
3279 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
3280 return iemRaiseUndefinedOpcode(pIemCpu);
3281 }
3282 if (pIemCpu->uCpl != 0)
3283 {
3284 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
3285 return iemRaiseGeneralProtectionFault0(pIemCpu);
3286 }
3287 if (uNewTr & X86_SEL_LDT)
3288 {
3289 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
3290 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
3291 }
3292 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
3293 {
3294 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
3295 return iemRaiseGeneralProtectionFault0(pIemCpu);
3296 }
3297
3298 /*
3299 * Read the descriptor.
3300 */
3301 IEMSELDESC Desc;
3302 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
3303 if (rcStrict != VINF_SUCCESS)
3304 return rcStrict;
3305
3306 /* Check GPs first. */
3307 if (Desc.Legacy.Gen.u1DescType)
3308 {
3309 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
3310 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3311 }
3312 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
3313 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3314 || IEM_IS_LONG_MODE(pIemCpu)) )
3315 {
3316 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
3317 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3318 }
3319 uint64_t u64Base;
3320 if (!IEM_IS_LONG_MODE(pIemCpu))
3321 u64Base = X86DESC_BASE(&Desc.Legacy);
3322 else
3323 {
3324 if (Desc.Long.Gen.u5Zeros)
3325 {
3326 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
3327 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3328 }
3329
3330 u64Base = X86DESC64_BASE(&Desc.Long);
3331 if (!IEM_IS_CANONICAL(u64Base))
3332 {
3333 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
3334 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3335 }
3336 }
3337
3338 /* NP */
3339 if (!Desc.Legacy.Gen.u1Present)
3340 {
3341 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
3342 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
3343 }
3344
3345 /*
3346 * Set it busy.
3347 * Note! Intel says this should lock down the whole descriptor, but we'll
3348 * restrict our selves to 32-bit for now due to lack of inline
3349 * assembly and such.
3350 */
3351 void *pvDesc;
3352 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW);
3353 if (rcStrict != VINF_SUCCESS)
3354 return rcStrict;
3355 switch ((uintptr_t)pvDesc & 3)
3356 {
3357 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
3358 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
3359 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
3360 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
3361 }
3362 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvDesc, IEM_ACCESS_DATA_RW);
3363 if (rcStrict != VINF_SUCCESS)
3364 return rcStrict;
3365 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3366
3367 /*
3368 * It checks out alright, update the registers.
3369 */
3370/** @todo check if the actual value is loaded or if the RPL is dropped */
3371 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3372 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK_OFF_RPL);
3373 else
3374 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
3375 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
3376 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3377 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3378 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
3379 pCtx->tr.u64Base = u64Base;
3380
3381 iemRegAddToRip(pIemCpu, cbInstr);
3382 return VINF_SUCCESS;
3383}
3384
3385
3386/**
3387 * Implements mov GReg,CRx.
3388 *
3389 * @param iGReg The general register to store the CRx value in.
3390 * @param iCrReg The CRx register to read (valid).
3391 */
3392IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
3393{
3394 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3395 if (pIemCpu->uCpl != 0)
3396 return iemRaiseGeneralProtectionFault0(pIemCpu);
3397 Assert(!pCtx->eflags.Bits.u1VM);
3398
3399 /* read it */
3400 uint64_t crX;
3401 switch (iCrReg)
3402 {
3403 case 0: crX = pCtx->cr0; break;
3404 case 2: crX = pCtx->cr2; break;
3405 case 3: crX = pCtx->cr3; break;
3406 case 4: crX = pCtx->cr4; break;
3407 case 8:
3408 {
3409 uint8_t uTpr;
3410 int rc = PDMApicGetTPR(IEMCPU_TO_VMCPU(pIemCpu), &uTpr, NULL, NULL);
3411 if (RT_SUCCESS(rc))
3412 crX = uTpr >> 4;
3413 else
3414 crX = 0;
3415 break;
3416 }
3417 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3418 }
3419
3420 /* store it */
3421 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3422 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
3423 else
3424 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
3425
3426 iemRegAddToRip(pIemCpu, cbInstr);
3427 return VINF_SUCCESS;
3428}
3429
3430
3431/**
3432 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
3433 *
3434 * @param iCrReg The CRx register to write (valid).
3435 * @param uNewCrX The new value.
3436 */
3437IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
3438{
3439 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3440 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3441 VBOXSTRICTRC rcStrict;
3442 int rc;
3443
3444 /*
3445 * Try store it.
3446 * Unfortunately, CPUM only does a tiny bit of the work.
3447 */
3448 switch (iCrReg)
3449 {
3450 case 0:
3451 {
3452 /*
3453 * Perform checks.
3454 */
3455 uint64_t const uOldCrX = pCtx->cr0;
3456 uNewCrX |= X86_CR0_ET; /* hardcoded */
3457
3458 /* Check for reserved bits. */
3459 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
3460 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
3461 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
3462 if (uNewCrX & ~(uint64_t)fValid)
3463 {
3464 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3465 return iemRaiseGeneralProtectionFault0(pIemCpu);
3466 }
3467
3468 /* Check for invalid combinations. */
3469 if ( (uNewCrX & X86_CR0_PG)
3470 && !(uNewCrX & X86_CR0_PE) )
3471 {
3472 Log(("Trying to set CR0.PG without CR0.PE\n"));
3473 return iemRaiseGeneralProtectionFault0(pIemCpu);
3474 }
3475
3476 if ( !(uNewCrX & X86_CR0_CD)
3477 && (uNewCrX & X86_CR0_NW) )
3478 {
3479 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
3480 return iemRaiseGeneralProtectionFault0(pIemCpu);
3481 }
3482
3483 /* Long mode consistency checks. */
3484 if ( (uNewCrX & X86_CR0_PG)
3485 && !(uOldCrX & X86_CR0_PG)
3486 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3487 {
3488 if (!(pCtx->cr4 & X86_CR4_PAE))
3489 {
3490 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
3491 return iemRaiseGeneralProtectionFault0(pIemCpu);
3492 }
3493 if (pCtx->cs.Attr.n.u1Long)
3494 {
3495 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
3496 return iemRaiseGeneralProtectionFault0(pIemCpu);
3497 }
3498 }
3499
3500 /** @todo check reserved PDPTR bits as AMD states. */
3501
3502 /*
3503 * Change CR0.
3504 */
3505 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3506 CPUMSetGuestCR0(pVCpu, uNewCrX);
3507 else
3508 pCtx->cr0 = uNewCrX;
3509 Assert(pCtx->cr0 == uNewCrX);
3510
3511 /*
3512 * Change EFER.LMA if entering or leaving long mode.
3513 */
3514 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
3515 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3516 {
3517 uint64_t NewEFER = pCtx->msrEFER;
3518 if (uNewCrX & X86_CR0_PG)
3519 NewEFER |= MSR_K6_EFER_LMA;
3520 else
3521 NewEFER &= ~MSR_K6_EFER_LMA;
3522
3523 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3524 CPUMSetGuestEFER(pVCpu, NewEFER);
3525 else
3526 pCtx->msrEFER = NewEFER;
3527 Assert(pCtx->msrEFER == NewEFER);
3528 }
3529
3530 /*
3531 * Inform PGM.
3532 */
3533 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3534 {
3535 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3536 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3537 {
3538 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3539 AssertRCReturn(rc, rc);
3540 /* ignore informational status codes */
3541 }
3542 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3543 }
3544 else
3545 rcStrict = VINF_SUCCESS;
3546
3547#ifdef IN_RC
3548 /* Return to ring-3 for rescheduling if WP or AM changes. */
3549 if ( rcStrict == VINF_SUCCESS
3550 && ( (uNewCrX & (X86_CR0_WP | X86_CR0_AM))
3551 != (uOldCrX & (X86_CR0_WP | X86_CR0_AM))) )
3552 rcStrict = VINF_EM_RESCHEDULE;
3553#endif
3554 break;
3555 }
3556
3557 /*
3558 * CR2 can be changed without any restrictions.
3559 */
3560 case 2:
3561 pCtx->cr2 = uNewCrX;
3562 rcStrict = VINF_SUCCESS;
3563 break;
3564
3565 /*
3566 * CR3 is relatively simple, although AMD and Intel have different
3567 * accounts of how setting reserved bits are handled. We take intel's
3568 * word for the lower bits and AMD's for the high bits (63:52).
3569 */
3570 /** @todo Testcase: Setting reserved bits in CR3, especially before
3571 * enabling paging. */
3572 case 3:
3573 {
3574 /* check / mask the value. */
3575 if (uNewCrX & UINT64_C(0xfff0000000000000))
3576 {
3577 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
3578 return iemRaiseGeneralProtectionFault0(pIemCpu);
3579 }
3580
3581 uint64_t fValid;
3582 if ( (pCtx->cr4 & X86_CR4_PAE)
3583 && (pCtx->msrEFER & MSR_K6_EFER_LME))
3584 fValid = UINT64_C(0x000ffffffffff014);
3585 else if (pCtx->cr4 & X86_CR4_PAE)
3586 fValid = UINT64_C(0xfffffff4);
3587 else
3588 fValid = UINT64_C(0xfffff014);
3589 if (uNewCrX & ~fValid)
3590 {
3591 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
3592 uNewCrX, uNewCrX & ~fValid));
3593 uNewCrX &= fValid;
3594 }
3595
3596 /** @todo If we're in PAE mode we should check the PDPTRs for
3597 * invalid bits. */
3598
3599 /* Make the change. */
3600 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3601 {
3602 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
3603 AssertRCSuccessReturn(rc, rc);
3604 }
3605 else
3606 pCtx->cr3 = uNewCrX;
3607
3608 /* Inform PGM. */
3609 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3610 {
3611 if (pCtx->cr0 & X86_CR0_PG)
3612 {
3613 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
3614 AssertRCReturn(rc, rc);
3615 /* ignore informational status codes */
3616 }
3617 }
3618 rcStrict = VINF_SUCCESS;
3619 break;
3620 }
3621
3622 /*
3623 * CR4 is a bit more tedious as there are bits which cannot be cleared
3624 * under some circumstances and such.
3625 */
3626 case 4:
3627 {
3628 uint64_t const uOldCrX = pCtx->cr4;
3629
3630 /* reserved bits */
3631 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
3632 | X86_CR4_TSD | X86_CR4_DE
3633 | X86_CR4_PSE | X86_CR4_PAE
3634 | X86_CR4_MCE | X86_CR4_PGE
3635 | X86_CR4_PCE | X86_CR4_OSFSXR
3636 | X86_CR4_OSXMMEEXCPT;
3637 //if (xxx)
3638 // fValid |= X86_CR4_VMXE;
3639 //if (xxx)
3640 // fValid |= X86_CR4_OSXSAVE;
3641 if (uNewCrX & ~(uint64_t)fValid)
3642 {
3643 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3644 return iemRaiseGeneralProtectionFault0(pIemCpu);
3645 }
3646
3647 /* long mode checks. */
3648 if ( (uOldCrX & X86_CR4_PAE)
3649 && !(uNewCrX & X86_CR4_PAE)
3650 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
3651 {
3652 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
3653 return iemRaiseGeneralProtectionFault0(pIemCpu);
3654 }
3655
3656
3657 /*
3658 * Change it.
3659 */
3660 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3661 {
3662 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
3663 AssertRCSuccessReturn(rc, rc);
3664 }
3665 else
3666 pCtx->cr4 = uNewCrX;
3667 Assert(pCtx->cr4 == uNewCrX);
3668
3669 /*
3670 * Notify SELM and PGM.
3671 */
3672 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3673 {
3674 /* SELM - VME may change things wrt to the TSS shadowing. */
3675 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
3676 {
3677 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
3678 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
3679#ifdef VBOX_WITH_RAW_MODE
3680 if (!HMIsEnabled(IEMCPU_TO_VM(pIemCpu)))
3681 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3682#endif
3683 }
3684
3685 /* PGM - flushing and mode. */
3686 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE))
3687 {
3688 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3689 AssertRCReturn(rc, rc);
3690 /* ignore informational status codes */
3691 }
3692 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3693 }
3694 else
3695 rcStrict = VINF_SUCCESS;
3696 break;
3697 }
3698
3699 /*
3700 * CR8 maps to the APIC TPR.
3701 */
3702 case 8:
3703 if (uNewCrX & ~(uint64_t)0xf)
3704 {
3705 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
3706 return iemRaiseGeneralProtectionFault0(pIemCpu);
3707 }
3708
3709 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3710 PDMApicSetTPR(IEMCPU_TO_VMCPU(pIemCpu), (uint8_t)uNewCrX << 4);
3711 rcStrict = VINF_SUCCESS;
3712 break;
3713
3714 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3715 }
3716
3717 /*
3718 * Advance the RIP on success.
3719 */
3720 if (RT_SUCCESS(rcStrict))
3721 {
3722 if (rcStrict != VINF_SUCCESS)
3723 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3724 iemRegAddToRip(pIemCpu, cbInstr);
3725 }
3726
3727 return rcStrict;
3728}
3729
3730
3731/**
3732 * Implements mov CRx,GReg.
3733 *
3734 * @param iCrReg The CRx register to write (valid).
3735 * @param iGReg The general register to load the DRx value from.
3736 */
3737IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
3738{
3739 if (pIemCpu->uCpl != 0)
3740 return iemRaiseGeneralProtectionFault0(pIemCpu);
3741 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3742
3743 /*
3744 * Read the new value from the source register and call common worker.
3745 */
3746 uint64_t uNewCrX;
3747 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3748 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
3749 else
3750 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
3751 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
3752}
3753
3754
3755/**
3756 * Implements 'LMSW r/m16'
3757 *
3758 * @param u16NewMsw The new value.
3759 */
3760IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
3761{
3762 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3763
3764 if (pIemCpu->uCpl != 0)
3765 return iemRaiseGeneralProtectionFault0(pIemCpu);
3766 Assert(!pCtx->eflags.Bits.u1VM);
3767
3768 /*
3769 * Compose the new CR0 value and call common worker.
3770 */
3771 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3772 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3773 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3774}
3775
3776
3777/**
3778 * Implements 'CLTS'.
3779 */
3780IEM_CIMPL_DEF_0(iemCImpl_clts)
3781{
3782 if (pIemCpu->uCpl != 0)
3783 return iemRaiseGeneralProtectionFault0(pIemCpu);
3784
3785 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3786 uint64_t uNewCr0 = pCtx->cr0;
3787 uNewCr0 &= ~X86_CR0_TS;
3788 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3789}
3790
3791
3792/**
3793 * Implements mov GReg,DRx.
3794 *
3795 * @param iGReg The general register to store the DRx value in.
3796 * @param iDrReg The DRx register to read (0-7).
3797 */
3798IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
3799{
3800 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3801
3802 /*
3803 * Check preconditions.
3804 */
3805
3806 /* Raise GPs. */
3807 if (pIemCpu->uCpl != 0)
3808 return iemRaiseGeneralProtectionFault0(pIemCpu);
3809 Assert(!pCtx->eflags.Bits.u1VM);
3810
3811 if ( (iDrReg == 4 || iDrReg == 5)
3812 && (pCtx->cr4 & X86_CR4_DE) )
3813 {
3814 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
3815 return iemRaiseGeneralProtectionFault0(pIemCpu);
3816 }
3817
3818 /* Raise #DB if general access detect is enabled. */
3819 if (pCtx->dr[7] & X86_DR7_GD)
3820 {
3821 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
3822 return iemRaiseDebugException(pIemCpu);
3823 }
3824
3825 /*
3826 * Read the debug register and store it in the specified general register.
3827 */
3828 uint64_t drX;
3829 switch (iDrReg)
3830 {
3831 case 0: drX = pCtx->dr[0]; break;
3832 case 1: drX = pCtx->dr[1]; break;
3833 case 2: drX = pCtx->dr[2]; break;
3834 case 3: drX = pCtx->dr[3]; break;
3835 case 6:
3836 case 4:
3837 drX = pCtx->dr[6];
3838 drX &= ~RT_BIT_32(12);
3839 drX |= UINT32_C(0xffff0ff0);
3840 break;
3841 case 7:
3842 case 5:
3843 drX = pCtx->dr[7];
3844 drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3845 drX |= RT_BIT_32(10);
3846 break;
3847 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3848 }
3849
3850 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3851 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
3852 else
3853 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
3854
3855 iemRegAddToRip(pIemCpu, cbInstr);
3856 return VINF_SUCCESS;
3857}
3858
3859
3860/**
3861 * Implements mov DRx,GReg.
3862 *
3863 * @param iDrReg The DRx register to write (valid).
3864 * @param iGReg The general register to load the DRx value from.
3865 */
3866IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
3867{
3868 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3869
3870 /*
3871 * Check preconditions.
3872 */
3873 if (pIemCpu->uCpl != 0)
3874 return iemRaiseGeneralProtectionFault0(pIemCpu);
3875 Assert(!pCtx->eflags.Bits.u1VM);
3876
3877 if ( (iDrReg == 4 || iDrReg == 5)
3878 && (pCtx->cr4 & X86_CR4_DE) )
3879 {
3880 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
3881 return iemRaiseGeneralProtectionFault0(pIemCpu);
3882 }
3883
3884 /* Raise #DB if general access detect is enabled. */
3885 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
3886 * \#GP? */
3887 if (pCtx->dr[7] & X86_DR7_GD)
3888 {
3889 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
3890 return iemRaiseDebugException(pIemCpu);
3891 }
3892
3893 /*
3894 * Read the new value from the source register.
3895 */
3896 uint64_t uNewDrX;
3897 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3898 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
3899 else
3900 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
3901
3902 /*
3903 * Adjust it.
3904 */
3905 switch (iDrReg)
3906 {
3907 case 0:
3908 case 1:
3909 case 2:
3910 case 3:
3911 /* nothing to adjust */
3912 break;
3913
3914 case 6:
3915 case 4:
3916 if (uNewDrX & UINT64_C(0xffffffff00000000))
3917 {
3918 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3919 return iemRaiseGeneralProtectionFault0(pIemCpu);
3920 }
3921 uNewDrX &= ~RT_BIT_32(12);
3922 uNewDrX |= UINT32_C(0xffff0ff0);
3923 break;
3924
3925 case 7:
3926 case 5:
3927 if (uNewDrX & UINT64_C(0xffffffff00000000))
3928 {
3929 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3930 return iemRaiseGeneralProtectionFault0(pIemCpu);
3931 }
3932 uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3933 uNewDrX |= RT_BIT_32(10);
3934 break;
3935
3936 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3937 }
3938
3939 /*
3940 * Do the actual setting.
3941 */
3942 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3943 {
3944 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
3945 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
3946 }
3947 else
3948 pCtx->dr[iDrReg] = uNewDrX;
3949
3950 iemRegAddToRip(pIemCpu, cbInstr);
3951 return VINF_SUCCESS;
3952}
3953
3954
3955/**
3956 * Implements 'INVLPG m'.
3957 *
3958 * @param GCPtrPage The effective address of the page to invalidate.
3959 * @remarks Updates the RIP.
3960 */
3961IEM_CIMPL_DEF_1(iemCImpl_invlpg, uint8_t, GCPtrPage)
3962{
3963 /* ring-0 only. */
3964 if (pIemCpu->uCpl != 0)
3965 return iemRaiseGeneralProtectionFault0(pIemCpu);
3966 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3967
3968 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
3969 iemRegAddToRip(pIemCpu, cbInstr);
3970
3971 if (rc == VINF_SUCCESS)
3972 return VINF_SUCCESS;
3973 if (rc == VINF_PGM_SYNC_CR3)
3974 return iemSetPassUpStatus(pIemCpu, rc);
3975
3976 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
3977 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", rc));
3978 return rc;
3979}
3980
3981
3982/**
3983 * Implements RDTSC.
3984 */
3985IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
3986{
3987 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3988
3989 /*
3990 * Check preconditions.
3991 */
3992 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
3993 return iemRaiseUndefinedOpcode(pIemCpu);
3994
3995 if ( (pCtx->cr4 & X86_CR4_TSD)
3996 && pIemCpu->uCpl != 0)
3997 {
3998 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
3999 return iemRaiseGeneralProtectionFault0(pIemCpu);
4000 }
4001
4002 /*
4003 * Do the job.
4004 */
4005 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
4006 pCtx->rax = (uint32_t)uTicks;
4007 pCtx->rdx = uTicks >> 32;
4008#ifdef IEM_VERIFICATION_MODE_FULL
4009 pIemCpu->fIgnoreRaxRdx = true;
4010#endif
4011
4012 iemRegAddToRip(pIemCpu, cbInstr);
4013 return VINF_SUCCESS;
4014}
4015
4016
4017/**
4018 * Implements RDMSR.
4019 */
4020IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
4021{
4022 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4023
4024 /*
4025 * Check preconditions.
4026 */
4027 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
4028 return iemRaiseUndefinedOpcode(pIemCpu);
4029 if (pIemCpu->uCpl != 0)
4030 return iemRaiseGeneralProtectionFault0(pIemCpu);
4031
4032 /*
4033 * Do the job.
4034 */
4035 RTUINT64U uValue;
4036 int rc = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
4037 if (rc != VINF_SUCCESS)
4038 {
4039 Log(("IEM: rdmsr(%#x) -> GP(0)\n", pCtx->ecx));
4040 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
4041 return iemRaiseGeneralProtectionFault0(pIemCpu);
4042 }
4043
4044 pCtx->rax = uValue.s.Lo;
4045 pCtx->rdx = uValue.s.Hi;
4046
4047 iemRegAddToRip(pIemCpu, cbInstr);
4048 return VINF_SUCCESS;
4049}
4050
4051
4052/**
4053 * Implements WRMSR.
4054 */
4055IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
4056{
4057 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4058
4059 /*
4060 * Check preconditions.
4061 */
4062 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
4063 return iemRaiseUndefinedOpcode(pIemCpu);
4064 if (pIemCpu->uCpl != 0)
4065 return iemRaiseGeneralProtectionFault0(pIemCpu);
4066
4067 /*
4068 * Do the job.
4069 */
4070 RTUINT64U uValue;
4071 uValue.s.Lo = pCtx->eax;
4072 uValue.s.Hi = pCtx->edx;
4073
4074 int rc;
4075 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4076 rc = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
4077 else
4078 {
4079 CPUMCTX CtxTmp = *pCtx;
4080 rc = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
4081 PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
4082 *pCtx = *pCtx2;
4083 *pCtx2 = CtxTmp;
4084 }
4085 if (rc != VINF_SUCCESS)
4086 {
4087 Log(("IEM: wrmsr(%#x,%#x`%08x) -> GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
4088 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
4089 return iemRaiseGeneralProtectionFault0(pIemCpu);
4090 }
4091
4092 iemRegAddToRip(pIemCpu, cbInstr);
4093 return VINF_SUCCESS;
4094}
4095
4096
4097/**
4098 * Implements 'IN eAX, port'.
4099 *
4100 * @param u16Port The source port.
4101 * @param cbReg The register size.
4102 */
4103IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
4104{
4105 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4106
4107 /*
4108 * CPL check
4109 */
4110 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
4111 if (rcStrict != VINF_SUCCESS)
4112 return rcStrict;
4113
4114 /*
4115 * Perform the I/O.
4116 */
4117 uint32_t u32Value;
4118 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4119 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, &u32Value, cbReg);
4120 else
4121 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
4122 if (IOM_SUCCESS(rcStrict))
4123 {
4124 switch (cbReg)
4125 {
4126 case 1: pCtx->al = (uint8_t)u32Value; break;
4127 case 2: pCtx->ax = (uint16_t)u32Value; break;
4128 case 4: pCtx->rax = u32Value; break;
4129 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
4130 }
4131 iemRegAddToRip(pIemCpu, cbInstr);
4132 pIemCpu->cPotentialExits++;
4133 if (rcStrict != VINF_SUCCESS)
4134 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
4135 }
4136
4137 return rcStrict;
4138}
4139
4140
4141/**
4142 * Implements 'IN eAX, DX'.
4143 *
4144 * @param cbReg The register size.
4145 */
4146IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
4147{
4148 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
4149}
4150
4151
4152/**
4153 * Implements 'OUT port, eAX'.
4154 *
4155 * @param u16Port The destination port.
4156 * @param cbReg The register size.
4157 */
4158IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
4159{
4160 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4161
4162 /*
4163 * CPL check
4164 */
4165 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
4166 if (rcStrict != VINF_SUCCESS)
4167 return rcStrict;
4168
4169 /*
4170 * Perform the I/O.
4171 */
4172 uint32_t u32Value;
4173 switch (cbReg)
4174 {
4175 case 1: u32Value = pCtx->al; break;
4176 case 2: u32Value = pCtx->ax; break;
4177 case 4: u32Value = pCtx->eax; break;
4178 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
4179 }
4180 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4181 rcStrict = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, u32Value, cbReg);
4182 else
4183 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
4184 if (IOM_SUCCESS(rcStrict))
4185 {
4186 iemRegAddToRip(pIemCpu, cbInstr);
4187 pIemCpu->cPotentialExits++;
4188 if (rcStrict != VINF_SUCCESS)
4189 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
4190 }
4191 return rcStrict;
4192}
4193
4194
4195/**
4196 * Implements 'OUT DX, eAX'.
4197 *
4198 * @param cbReg The register size.
4199 */
4200IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
4201{
4202 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
4203}
4204
4205
4206/**
4207 * Implements 'CLI'.
4208 */
4209IEM_CIMPL_DEF_0(iemCImpl_cli)
4210{
4211 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4212 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4213 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
4214 uint32_t const fEflOld = fEfl;
4215 if (pCtx->cr0 & X86_CR0_PE)
4216 {
4217 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
4218 if (!(fEfl & X86_EFL_VM))
4219 {
4220 if (pIemCpu->uCpl <= uIopl)
4221 fEfl &= ~X86_EFL_IF;
4222 else if ( pIemCpu->uCpl == 3
4223 && (pCtx->cr4 & X86_CR4_PVI) )
4224 fEfl &= ~X86_EFL_VIF;
4225 else
4226 return iemRaiseGeneralProtectionFault0(pIemCpu);
4227 }
4228 /* V8086 */
4229 else if (uIopl == 3)
4230 fEfl &= ~X86_EFL_IF;
4231 else if ( uIopl < 3
4232 && (pCtx->cr4 & X86_CR4_VME) )
4233 fEfl &= ~X86_EFL_VIF;
4234 else
4235 return iemRaiseGeneralProtectionFault0(pIemCpu);
4236 }
4237 /* real mode */
4238 else
4239 fEfl &= ~X86_EFL_IF;
4240
4241 /* Commit. */
4242 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
4243 iemRegAddToRip(pIemCpu, cbInstr);
4244 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld);
4245 return VINF_SUCCESS;
4246}
4247
4248
4249/**
4250 * Implements 'STI'.
4251 */
4252IEM_CIMPL_DEF_0(iemCImpl_sti)
4253{
4254 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4255 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4256 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
4257 uint32_t const fEflOld = fEfl;
4258
4259 if (pCtx->cr0 & X86_CR0_PE)
4260 {
4261 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
4262 if (!(fEfl & X86_EFL_VM))
4263 {
4264 if (pIemCpu->uCpl <= uIopl)
4265 fEfl |= X86_EFL_IF;
4266 else if ( pIemCpu->uCpl == 3
4267 && (pCtx->cr4 & X86_CR4_PVI)
4268 && !(fEfl & X86_EFL_VIP) )
4269 fEfl |= X86_EFL_VIF;
4270 else
4271 return iemRaiseGeneralProtectionFault0(pIemCpu);
4272 }
4273 /* V8086 */
4274 else if (uIopl == 3)
4275 fEfl |= X86_EFL_IF;
4276 else if ( uIopl < 3
4277 && (pCtx->cr4 & X86_CR4_VME)
4278 && !(fEfl & X86_EFL_VIP) )
4279 fEfl |= X86_EFL_VIF;
4280 else
4281 return iemRaiseGeneralProtectionFault0(pIemCpu);
4282 }
4283 /* real mode */
4284 else
4285 fEfl |= X86_EFL_IF;
4286
4287 /* Commit. */
4288 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
4289 iemRegAddToRip(pIemCpu, cbInstr);
4290 if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_VERIFICATION_ENABLED(pIemCpu))
4291 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4292 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
4293 return VINF_SUCCESS;
4294}
4295
4296
4297/**
4298 * Implements 'HLT'.
4299 */
4300IEM_CIMPL_DEF_0(iemCImpl_hlt)
4301{
4302 if (pIemCpu->uCpl != 0)
4303 return iemRaiseGeneralProtectionFault0(pIemCpu);
4304 iemRegAddToRip(pIemCpu, cbInstr);
4305 return VINF_EM_HALT;
4306}
4307
4308
4309/**
4310 * Implements 'CPUID'.
4311 */
4312IEM_CIMPL_DEF_0(iemCImpl_cpuid)
4313{
4314 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4315
4316 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
4317 pCtx->rax &= UINT32_C(0xffffffff);
4318 pCtx->rbx &= UINT32_C(0xffffffff);
4319 pCtx->rcx &= UINT32_C(0xffffffff);
4320 pCtx->rdx &= UINT32_C(0xffffffff);
4321
4322 iemRegAddToRip(pIemCpu, cbInstr);
4323 return VINF_SUCCESS;
4324}
4325
4326
4327/**
4328 * Implements 'AAD'.
4329 *
4330 * @param enmEffOpSize The effective operand size.
4331 */
4332IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
4333{
4334 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4335
4336 uint16_t const ax = pCtx->ax;
4337 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
4338 pCtx->ax = al;
4339 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
4340 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
4341 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
4342
4343 iemRegAddToRip(pIemCpu, cbInstr);
4344 return VINF_SUCCESS;
4345}
4346
4347
4348/**
4349 * Implements 'AAM'.
4350 *
4351 * @param bImm The immediate operand. Cannot be 0.
4352 */
4353IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
4354{
4355 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4356 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
4357
4358 uint16_t const ax = pCtx->ax;
4359 uint8_t const al = (uint8_t)ax % bImm;
4360 uint8_t const ah = (uint8_t)ax / bImm;
4361 pCtx->ax = (ah << 8) + al;
4362 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
4363 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
4364 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
4365
4366 iemRegAddToRip(pIemCpu, cbInstr);
4367 return VINF_SUCCESS;
4368}
4369
4370
4371
4372
4373/*
4374 * Instantiate the various string operation combinations.
4375 */
4376#define OP_SIZE 8
4377#define ADDR_SIZE 16
4378#include "IEMAllCImplStrInstr.cpp.h"
4379#define OP_SIZE 8
4380#define ADDR_SIZE 32
4381#include "IEMAllCImplStrInstr.cpp.h"
4382#define OP_SIZE 8
4383#define ADDR_SIZE 64
4384#include "IEMAllCImplStrInstr.cpp.h"
4385
4386#define OP_SIZE 16
4387#define ADDR_SIZE 16
4388#include "IEMAllCImplStrInstr.cpp.h"
4389#define OP_SIZE 16
4390#define ADDR_SIZE 32
4391#include "IEMAllCImplStrInstr.cpp.h"
4392#define OP_SIZE 16
4393#define ADDR_SIZE 64
4394#include "IEMAllCImplStrInstr.cpp.h"
4395
4396#define OP_SIZE 32
4397#define ADDR_SIZE 16
4398#include "IEMAllCImplStrInstr.cpp.h"
4399#define OP_SIZE 32
4400#define ADDR_SIZE 32
4401#include "IEMAllCImplStrInstr.cpp.h"
4402#define OP_SIZE 32
4403#define ADDR_SIZE 64
4404#include "IEMAllCImplStrInstr.cpp.h"
4405
4406#define OP_SIZE 64
4407#define ADDR_SIZE 32
4408#include "IEMAllCImplStrInstr.cpp.h"
4409#define OP_SIZE 64
4410#define ADDR_SIZE 64
4411#include "IEMAllCImplStrInstr.cpp.h"
4412
4413
4414/**
4415 * Implements 'FINIT' and 'FNINIT'.
4416 *
4417 * @param fCheckXcpts Whether to check for umasked pending exceptions or
4418 * not.
4419 */
4420IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
4421{
4422 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4423
4424 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4425 return iemRaiseDeviceNotAvailable(pIemCpu);
4426
4427 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
4428 if (fCheckXcpts && TODO )
4429 return iemRaiseMathFault(pIemCpu);
4430 */
4431
4432 if (iemFRegIsFxSaveFormat(pIemCpu))
4433 {
4434 pCtx->fpu.FCW = 0x37f;
4435 pCtx->fpu.FSW = 0;
4436 pCtx->fpu.FTW = 0x00; /* 0 - empty. */
4437 pCtx->fpu.FPUDP = 0;
4438 pCtx->fpu.DS = 0; //??
4439 pCtx->fpu.Rsrvd2= 0;
4440 pCtx->fpu.FPUIP = 0;
4441 pCtx->fpu.CS = 0; //??
4442 pCtx->fpu.Rsrvd1= 0;
4443 pCtx->fpu.FOP = 0;
4444 }
4445 else
4446 {
4447 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
4448 pFpu->FCW = 0x37f;
4449 pFpu->FSW = 0;
4450 pFpu->FTW = 0xffff; /* 11 - empty */
4451 pFpu->FPUOO = 0; //??
4452 pFpu->FPUOS = 0; //??
4453 pFpu->FPUIP = 0;
4454 pFpu->CS = 0; //??
4455 pFpu->FOP = 0;
4456 }
4457
4458 iemHlpUsedFpu(pIemCpu);
4459 iemRegAddToRip(pIemCpu, cbInstr);
4460 return VINF_SUCCESS;
4461}
4462
4463
4464/**
4465 * Implements 'FXSAVE'.
4466 *
4467 * @param iEffSeg The effective segment.
4468 * @param GCPtrEff The address of the image.
4469 * @param enmEffOpSize The operand size (only REX.W really matters).
4470 */
4471IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
4472{
4473 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4474
4475 /*
4476 * Raise exceptions.
4477 */
4478 if (pCtx->cr0 & X86_CR0_EM)
4479 return iemRaiseUndefinedOpcode(pIemCpu);
4480 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
4481 return iemRaiseDeviceNotAvailable(pIemCpu);
4482 if (GCPtrEff & 15)
4483 {
4484 /** @todo CPU/VM detection possible! \#AC might not be signal for
4485 * all/any misalignment sizes, intel says its an implementation detail. */
4486 if ( (pCtx->cr0 & X86_CR0_AM)
4487 && pCtx->eflags.Bits.u1AC
4488 && pIemCpu->uCpl == 3)
4489 return iemRaiseAlignmentCheckException(pIemCpu);
4490 return iemRaiseGeneralProtectionFault0(pIemCpu);
4491 }
4492 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4493
4494 /*
4495 * Access the memory.
4496 */
4497 void *pvMem512;
4498 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4499 if (rcStrict != VINF_SUCCESS)
4500 return rcStrict;
4501 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
4502
4503 /*
4504 * Store the registers.
4505 */
4506 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4507 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
4508
4509 /* common for all formats */
4510 pDst->FCW = pCtx->fpu.FCW;
4511 pDst->FSW = pCtx->fpu.FSW;
4512 pDst->FTW = pCtx->fpu.FTW & UINT16_C(0xff);
4513 pDst->FOP = pCtx->fpu.FOP;
4514 pDst->MXCSR = pCtx->fpu.MXCSR;
4515 pDst->MXCSR_MASK = pCtx->fpu.MXCSR_MASK;
4516 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
4517 {
4518 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
4519 * them for now... */
4520 pDst->aRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
4521 pDst->aRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
4522 pDst->aRegs[i].au32[2] = pCtx->fpu.aRegs[i].au32[2] & UINT32_C(0xffff);
4523 pDst->aRegs[i].au32[3] = 0;
4524 }
4525
4526 /* FPU IP, CS, DP and DS. */
4527 /** @todo FPU IP, CS, DP and DS cannot be implemented correctly without extra
4528 * state information. :-/
4529 * Storing zeros now to prevent any potential leakage of host info. */
4530 pDst->FPUIP = 0;
4531 pDst->CS = 0;
4532 pDst->Rsrvd1 = 0;
4533 pDst->FPUDP = 0;
4534 pDst->DS = 0;
4535 pDst->Rsrvd2 = 0;
4536
4537 /* XMM registers. */
4538 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4539 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4540 || pIemCpu->uCpl != 0)
4541 {
4542 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4543 for (uint32_t i = 0; i < cXmmRegs; i++)
4544 pDst->aXMM[i] = pCtx->fpu.aXMM[i];
4545 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
4546 * right? */
4547 }
4548
4549 /*
4550 * Commit the memory.
4551 */
4552 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4553 if (rcStrict != VINF_SUCCESS)
4554 return rcStrict;
4555
4556 iemRegAddToRip(pIemCpu, cbInstr);
4557 return VINF_SUCCESS;
4558}
4559
4560
4561/**
4562 * Implements 'FXRSTOR'.
4563 *
4564 * @param GCPtrEff The address of the image.
4565 * @param enmEffOpSize The operand size (only REX.W really matters).
4566 */
4567IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
4568{
4569 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4570
4571 /*
4572 * Raise exceptions.
4573 */
4574 if (pCtx->cr0 & X86_CR0_EM)
4575 return iemRaiseUndefinedOpcode(pIemCpu);
4576 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
4577 return iemRaiseDeviceNotAvailable(pIemCpu);
4578 if (GCPtrEff & 15)
4579 {
4580 /** @todo CPU/VM detection possible! \#AC might not be signal for
4581 * all/any misalignment sizes, intel says its an implementation detail. */
4582 if ( (pCtx->cr0 & X86_CR0_AM)
4583 && pCtx->eflags.Bits.u1AC
4584 && pIemCpu->uCpl == 3)
4585 return iemRaiseAlignmentCheckException(pIemCpu);
4586 return iemRaiseGeneralProtectionFault0(pIemCpu);
4587 }
4588 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4589
4590 /*
4591 * Access the memory.
4592 */
4593 void *pvMem512;
4594 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
4595 if (rcStrict != VINF_SUCCESS)
4596 return rcStrict;
4597 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
4598
4599 /*
4600 * Check the state for stuff which will GP(0).
4601 */
4602 uint32_t const fMXCSR = pSrc->MXCSR;
4603 uint32_t const fMXCSR_MASK = pCtx->fpu.MXCSR_MASK ? pCtx->fpu.MXCSR_MASK : UINT32_C(0xffbf);
4604 if (fMXCSR & ~fMXCSR_MASK)
4605 {
4606 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
4607 return iemRaiseGeneralProtectionFault0(pIemCpu);
4608 }
4609
4610 /*
4611 * Load the registers.
4612 */
4613 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4614 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
4615
4616 /* common for all formats */
4617 pCtx->fpu.FCW = pSrc->FCW;
4618 pCtx->fpu.FSW = pSrc->FSW;
4619 pCtx->fpu.FTW = pSrc->FTW & UINT16_C(0xff);
4620 pCtx->fpu.FOP = pSrc->FOP;
4621 pCtx->fpu.MXCSR = fMXCSR;
4622 /* (MXCSR_MASK is read-only) */
4623 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
4624 {
4625 pCtx->fpu.aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
4626 pCtx->fpu.aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
4627 pCtx->fpu.aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
4628 pCtx->fpu.aRegs[i].au32[3] = 0;
4629 }
4630
4631 /* FPU IP, CS, DP and DS. */
4632 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4633 {
4634 pCtx->fpu.FPUIP = pSrc->FPUIP;
4635 pCtx->fpu.CS = pSrc->CS;
4636 pCtx->fpu.Rsrvd1 = pSrc->Rsrvd1;
4637 pCtx->fpu.FPUDP = pSrc->FPUDP;
4638 pCtx->fpu.DS = pSrc->DS;
4639 pCtx->fpu.Rsrvd2 = pSrc->Rsrvd2;
4640 }
4641 else
4642 {
4643 pCtx->fpu.FPUIP = pSrc->FPUIP;
4644 pCtx->fpu.CS = pSrc->CS;
4645 pCtx->fpu.Rsrvd1 = 0;
4646 pCtx->fpu.FPUDP = pSrc->FPUDP;
4647 pCtx->fpu.DS = pSrc->DS;
4648 pCtx->fpu.Rsrvd2 = 0;
4649 }
4650
4651 /* XMM registers. */
4652 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4653 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4654 || pIemCpu->uCpl != 0)
4655 {
4656 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4657 for (uint32_t i = 0; i < cXmmRegs; i++)
4658 pCtx->fpu.aXMM[i] = pSrc->aXMM[i];
4659 }
4660
4661 /*
4662 * Commit the memory.
4663 */
4664 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
4665 if (rcStrict != VINF_SUCCESS)
4666 return rcStrict;
4667
4668 iemHlpUsedFpu(pIemCpu);
4669 iemRegAddToRip(pIemCpu, cbInstr);
4670 return VINF_SUCCESS;
4671}
4672
4673
4674/**
4675 * Commmon routine for fnstenv and fnsave.
4676 *
4677 * @param uPtr Where to store the state.
4678 * @param pCtx The CPU context.
4679 */
4680static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
4681{
4682 if (enmEffOpSize == IEMMODE_16BIT)
4683 {
4684 uPtr.pu16[0] = pCtx->fpu.FCW;
4685 uPtr.pu16[1] = pCtx->fpu.FSW;
4686 uPtr.pu16[2] = iemFpuCalcFullFtw(pCtx);
4687 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4688 {
4689 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
4690 * protected mode or long mode and we save it in real mode? And vice
4691 * versa? And with 32-bit operand size? I think CPU is storing the
4692 * effective address ((CS << 4) + IP) in the offset register and not
4693 * doing any address calculations here. */
4694 uPtr.pu16[3] = (uint16_t)pCtx->fpu.FPUIP;
4695 uPtr.pu16[4] = ((pCtx->fpu.FPUIP >> 4) & UINT16_C(0xf000)) | pCtx->fpu.FOP;
4696 uPtr.pu16[5] = (uint16_t)pCtx->fpu.FPUDP;
4697 uPtr.pu16[6] = (pCtx->fpu.FPUDP >> 4) & UINT16_C(0xf000);
4698 }
4699 else
4700 {
4701 uPtr.pu16[3] = pCtx->fpu.FPUIP;
4702 uPtr.pu16[4] = pCtx->fpu.CS;
4703 uPtr.pu16[5] = pCtx->fpu.FPUDP;
4704 uPtr.pu16[6] = pCtx->fpu.DS;
4705 }
4706 }
4707 else
4708 {
4709 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
4710 uPtr.pu16[0*2] = pCtx->fpu.FCW;
4711 uPtr.pu16[1*2] = pCtx->fpu.FSW;
4712 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pCtx);
4713 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4714 {
4715 uPtr.pu16[3*2] = (uint16_t)pCtx->fpu.FPUIP;
4716 uPtr.pu32[4] = ((pCtx->fpu.FPUIP & UINT32_C(0xffff0000)) >> 4) | pCtx->fpu.FOP;
4717 uPtr.pu16[5*2] = (uint16_t)pCtx->fpu.FPUDP;
4718 uPtr.pu32[6] = (pCtx->fpu.FPUDP & UINT32_C(0xffff0000)) >> 4;
4719 }
4720 else
4721 {
4722 uPtr.pu32[3] = pCtx->fpu.FPUIP;
4723 uPtr.pu16[4*2] = pCtx->fpu.CS;
4724 uPtr.pu16[4*2+1]= pCtx->fpu.FOP;
4725 uPtr.pu32[5] = pCtx->fpu.FPUDP;
4726 uPtr.pu16[6*2] = pCtx->fpu.DS;
4727 }
4728 }
4729}
4730
4731
4732/**
4733 * Commmon routine for fldenv and frstor
4734 *
4735 * @param uPtr Where to store the state.
4736 * @param pCtx The CPU context.
4737 */
4738static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
4739{
4740 if (enmEffOpSize == IEMMODE_16BIT)
4741 {
4742 pCtx->fpu.FCW = uPtr.pu16[0];
4743 pCtx->fpu.FSW = uPtr.pu16[1];
4744 pCtx->fpu.FTW = uPtr.pu16[2];
4745 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4746 {
4747 pCtx->fpu.FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
4748 pCtx->fpu.FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
4749 pCtx->fpu.FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
4750 pCtx->fpu.CS = 0;
4751 pCtx->fpu.Rsrvd1= 0;
4752 pCtx->fpu.DS = 0;
4753 pCtx->fpu.Rsrvd2= 0;
4754 }
4755 else
4756 {
4757 pCtx->fpu.FPUIP = uPtr.pu16[3];
4758 pCtx->fpu.CS = uPtr.pu16[4];
4759 pCtx->fpu.Rsrvd1= 0;
4760 pCtx->fpu.FPUDP = uPtr.pu16[5];
4761 pCtx->fpu.DS = uPtr.pu16[6];
4762 pCtx->fpu.Rsrvd2= 0;
4763 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
4764 }
4765 }
4766 else
4767 {
4768 pCtx->fpu.FCW = uPtr.pu16[0*2];
4769 pCtx->fpu.FSW = uPtr.pu16[1*2];
4770 pCtx->fpu.FTW = uPtr.pu16[2*2];
4771 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4772 {
4773 pCtx->fpu.FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
4774 pCtx->fpu.FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
4775 pCtx->fpu.FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
4776 pCtx->fpu.CS = 0;
4777 pCtx->fpu.Rsrvd1= 0;
4778 pCtx->fpu.DS = 0;
4779 pCtx->fpu.Rsrvd2= 0;
4780 }
4781 else
4782 {
4783 pCtx->fpu.FPUIP = uPtr.pu32[3];
4784 pCtx->fpu.CS = uPtr.pu16[4*2];
4785 pCtx->fpu.Rsrvd1= 0;
4786 pCtx->fpu.FOP = uPtr.pu16[4*2+1];
4787 pCtx->fpu.FPUDP = uPtr.pu32[5];
4788 pCtx->fpu.DS = uPtr.pu16[6*2];
4789 pCtx->fpu.Rsrvd2= 0;
4790 }
4791 }
4792
4793 /* Make adjustments. */
4794 pCtx->fpu.FTW = iemFpuCompressFtw(pCtx->fpu.FTW);
4795 pCtx->fpu.FCW &= ~X86_FCW_ZERO_MASK;
4796 iemFpuRecalcExceptionStatus(pCtx);
4797 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
4798 * exceptions are pending after loading the saved state? */
4799}
4800
4801
4802/**
4803 * Implements 'FNSTENV'.
4804 *
4805 * @param enmEffOpSize The operand size (only REX.W really matters).
4806 * @param iEffSeg The effective segment register for @a GCPtrEff.
4807 * @param GCPtrEffDst The address of the image.
4808 */
4809IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4810{
4811 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4812 RTPTRUNION uPtr;
4813 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4814 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4815 if (rcStrict != VINF_SUCCESS)
4816 return rcStrict;
4817
4818 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4819
4820 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4821 if (rcStrict != VINF_SUCCESS)
4822 return rcStrict;
4823
4824 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4825 iemRegAddToRip(pIemCpu, cbInstr);
4826 return VINF_SUCCESS;
4827}
4828
4829
4830/**
4831 * Implements 'FNSAVE'.
4832 *
4833 * @param GCPtrEffDst The address of the image.
4834 * @param enmEffOpSize The operand size.
4835 */
4836IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4837{
4838 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4839 RTPTRUNION uPtr;
4840 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
4841 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4842 if (rcStrict != VINF_SUCCESS)
4843 return rcStrict;
4844
4845 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4846 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
4847 for (uint32_t i = 0; i < RT_ELEMENTS(pCtx->fpu.aRegs); i++)
4848 {
4849 paRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
4850 paRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
4851 paRegs[i].au16[4] = pCtx->fpu.aRegs[i].au16[4];
4852 }
4853
4854 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4855 if (rcStrict != VINF_SUCCESS)
4856 return rcStrict;
4857
4858 /*
4859 * Re-initialize the FPU.
4860 */
4861 pCtx->fpu.FCW = 0x37f;
4862 pCtx->fpu.FSW = 0;
4863 pCtx->fpu.FTW = 0x00; /* 0 - empty */
4864 pCtx->fpu.FPUDP = 0;
4865 pCtx->fpu.DS = 0;
4866 pCtx->fpu.Rsrvd2= 0;
4867 pCtx->fpu.FPUIP = 0;
4868 pCtx->fpu.CS = 0;
4869 pCtx->fpu.Rsrvd1= 0;
4870 pCtx->fpu.FOP = 0;
4871
4872 iemHlpUsedFpu(pIemCpu);
4873 iemRegAddToRip(pIemCpu, cbInstr);
4874 return VINF_SUCCESS;
4875}
4876
4877
4878
4879/**
4880 * Implements 'FLDENV'.
4881 *
4882 * @param enmEffOpSize The operand size (only REX.W really matters).
4883 * @param iEffSeg The effective segment register for @a GCPtrEff.
4884 * @param GCPtrEffSrc The address of the image.
4885 */
4886IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
4887{
4888 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4889 RTCPTRUNION uPtr;
4890 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4891 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
4892 if (rcStrict != VINF_SUCCESS)
4893 return rcStrict;
4894
4895 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4896
4897 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
4898 if (rcStrict != VINF_SUCCESS)
4899 return rcStrict;
4900
4901 iemHlpUsedFpu(pIemCpu);
4902 iemRegAddToRip(pIemCpu, cbInstr);
4903 return VINF_SUCCESS;
4904}
4905
4906
4907/**
4908 * Implements 'FRSTOR'.
4909 *
4910 * @param GCPtrEffSrc The address of the image.
4911 * @param enmEffOpSize The operand size.
4912 */
4913IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
4914{
4915 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4916 RTCPTRUNION uPtr;
4917 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
4918 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
4919 if (rcStrict != VINF_SUCCESS)
4920 return rcStrict;
4921
4922 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4923 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
4924 for (uint32_t i = 0; i < RT_ELEMENTS(pCtx->fpu.aRegs); i++)
4925 {
4926 pCtx->fpu.aRegs[i].au32[0] = paRegs[i].au32[0];
4927 pCtx->fpu.aRegs[i].au32[1] = paRegs[i].au32[1];
4928 pCtx->fpu.aRegs[i].au32[2] = paRegs[i].au16[4];
4929 pCtx->fpu.aRegs[i].au32[3] = 0;
4930 }
4931
4932 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
4933 if (rcStrict != VINF_SUCCESS)
4934 return rcStrict;
4935
4936 iemHlpUsedFpu(pIemCpu);
4937 iemRegAddToRip(pIemCpu, cbInstr);
4938 return VINF_SUCCESS;
4939}
4940
4941
4942/**
4943 * Implements 'FLDCW'.
4944 *
4945 * @param u16Fcw The new FCW.
4946 */
4947IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
4948{
4949 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4950
4951 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
4952 /** @todo Testcase: Try see what happens when trying to set undefined bits
4953 * (other than 6 and 7). Currently ignoring them. */
4954 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
4955 * according to FSW. (This is was is currently implemented.) */
4956 pCtx->fpu.FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
4957 iemFpuRecalcExceptionStatus(pCtx);
4958
4959 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4960 iemHlpUsedFpu(pIemCpu);
4961 iemRegAddToRip(pIemCpu, cbInstr);
4962 return VINF_SUCCESS;
4963}
4964
4965
4966
4967/**
4968 * Implements the underflow case of fxch.
4969 *
4970 * @param iStReg The other stack register.
4971 */
4972IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
4973{
4974 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4975
4976 unsigned const iReg1 = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4977 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4978 Assert(!(RT_BIT(iReg1) & pCtx->fpu.FTW) || !(RT_BIT(iReg2) & pCtx->fpu.FTW));
4979
4980 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
4981 * registers are read as QNaN and then exchanged. This could be
4982 * wrong... */
4983 if (pCtx->fpu.FCW & X86_FCW_IM)
4984 {
4985 if (RT_BIT(iReg1) & pCtx->fpu.FTW)
4986 {
4987 if (RT_BIT(iReg2) & pCtx->fpu.FTW)
4988 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4989 else
4990 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[iStReg].r80;
4991 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4992 }
4993 else
4994 {
4995 pCtx->fpu.aRegs[iStReg].r80 = pCtx->fpu.aRegs[0].r80;
4996 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4997 }
4998 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4999 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5000 }
5001 else
5002 {
5003 /* raise underflow exception, don't change anything. */
5004 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
5005 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5006 }
5007
5008 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5009 iemHlpUsedFpu(pIemCpu);
5010 iemRegAddToRip(pIemCpu, cbInstr);
5011 return VINF_SUCCESS;
5012}
5013
5014
5015/**
5016 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
5017 *
5018 * @param cToAdd 1 or 7.
5019 */
5020IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
5021{
5022 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5023 Assert(iStReg < 8);
5024
5025 /*
5026 * Raise exceptions.
5027 */
5028 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
5029 return iemRaiseDeviceNotAvailable(pIemCpu);
5030 uint16_t u16Fsw = pCtx->fpu.FSW;
5031 if (u16Fsw & X86_FSW_ES)
5032 return iemRaiseMathFault(pIemCpu);
5033
5034 /*
5035 * Check if any of the register accesses causes #SF + #IA.
5036 */
5037 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
5038 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
5039 if ((pCtx->fpu.FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
5040 {
5041 uint32_t u32Eflags = pfnAImpl(&pCtx->fpu, &u16Fsw, &pCtx->fpu.aRegs[0].r80, &pCtx->fpu.aRegs[iStReg].r80);
5042 pCtx->fpu.FSW &= ~X86_FSW_C1;
5043 pCtx->fpu.FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
5044 if ( !(u16Fsw & X86_FSW_IE)
5045 || (pCtx->fpu.FCW & X86_FCW_IM) )
5046 {
5047 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
5048 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
5049 }
5050 }
5051 else if (pCtx->fpu.FCW & X86_FCW_IM)
5052 {
5053 /* Masked underflow. */
5054 pCtx->fpu.FSW &= ~X86_FSW_C1;
5055 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
5056 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
5057 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
5058 }
5059 else
5060 {
5061 /* Raise underflow - don't touch EFLAGS or TOP. */
5062 pCtx->fpu.FSW &= ~X86_FSW_C1;
5063 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5064 fPop = false;
5065 }
5066
5067 /*
5068 * Pop if necessary.
5069 */
5070 if (fPop)
5071 {
5072 pCtx->fpu.FTW &= ~RT_BIT(iReg1);
5073 pCtx->fpu.FSW &= X86_FSW_TOP_MASK;
5074 pCtx->fpu.FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
5075 }
5076
5077 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5078 iemHlpUsedFpu(pIemCpu);
5079 iemRegAddToRip(pIemCpu, cbInstr);
5080 return VINF_SUCCESS;
5081}
5082
5083/** @} */
5084
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette