VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 47291

Last change on this file since 47291 was 47291, checked in by vboxsync, 11 years ago

IEM: More 64-bit fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 167.3 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 47291 2013-07-22 01:17:28Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 X86EFLAGS Efl;
38 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
39 if ( (pCtx->cr0 & X86_CR0_PE)
40 && ( pIemCpu->uCpl > Efl.Bits.u2IOPL
41 || Efl.Bits.u1VM) )
42 {
43 NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
44 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap\n"));
45 }
46 return VINF_SUCCESS;
47}
48
49
50#if 0
51/**
52 * Calculates the parity bit.
53 *
54 * @returns true if the bit is set, false if not.
55 * @param u8Result The least significant byte of the result.
56 */
57static bool iemHlpCalcParityFlag(uint8_t u8Result)
58{
59 /*
60 * Parity is set if the number of bits in the least significant byte of
61 * the result is even.
62 */
63 uint8_t cBits;
64 cBits = u8Result & 1; /* 0 */
65 u8Result >>= 1;
66 cBits += u8Result & 1;
67 u8Result >>= 1;
68 cBits += u8Result & 1;
69 u8Result >>= 1;
70 cBits += u8Result & 1;
71 u8Result >>= 1;
72 cBits += u8Result & 1; /* 4 */
73 u8Result >>= 1;
74 cBits += u8Result & 1;
75 u8Result >>= 1;
76 cBits += u8Result & 1;
77 u8Result >>= 1;
78 cBits += u8Result & 1;
79 return !(cBits & 1);
80}
81#endif /* not used */
82
83
84/**
85 * Updates the specified flags according to a 8-bit result.
86 *
87 * @param pIemCpu The IEM state of the calling EMT.
88 * @param u8Result The result to set the flags according to.
89 * @param fToUpdate The flags to update.
90 * @param fUndefined The flags that are specified as undefined.
91 */
92static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
93{
94 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
95
96 uint32_t fEFlags = pCtx->eflags.u;
97 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
98 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
99 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
100}
101
102
103/**
104 * Loads a NULL data selector into a selector register, both the hidden and
105 * visible parts, in protected mode.
106 *
107 * @param pSReg Pointer to the segment register.
108 * @param uRpl The RPL.
109 */
110static void iemHlpLoadNullDataSelectorProt(PCPUMSELREG pSReg, RTSEL uRpl)
111{
112 /** @todo Testcase: write a testcase checking what happends when loading a NULL
113 * data selector in protected mode. */
114 pSReg->Sel = uRpl;
115 pSReg->ValidSel = uRpl;
116 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
117 pSReg->u64Base = 0;
118 pSReg->u32Limit = 0;
119 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
120}
121
122
123/**
124 * Helper used by iret.
125 *
126 * @param uCpl The new CPL.
127 * @param pSReg Pointer to the segment register.
128 */
129static void iemHlpAdjustSelectorForNewCpl(PIEMCPU pIemCpu, uint8_t uCpl, PCPUMSELREG pSReg)
130{
131#ifdef VBOX_WITH_RAW_MODE_NOT_R0
132 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
133 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
134#else
135 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
136#endif
137
138 if ( uCpl > pSReg->Attr.n.u2Dpl
139 && pSReg->Attr.n.u1DescType /* code or data, not system */
140 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
141 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
142 iemHlpLoadNullDataSelectorProt(pSReg, 0);
143}
144
145
146/**
147 * Indicates that we have modified the FPU state.
148 *
149 * @param pIemCpu The IEM state of the calling EMT.
150 */
151DECLINLINE(void) iemHlpUsedFpu(PIEMCPU pIemCpu)
152{
153 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);
154}
155
156/** @} */
157
158/** @name C Implementations
159 * @{
160 */
161
162/**
163 * Implements a 16-bit popa.
164 */
165IEM_CIMPL_DEF_0(iemCImpl_popa_16)
166{
167 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
168 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
169 RTGCPTR GCPtrLast = GCPtrStart + 15;
170 VBOXSTRICTRC rcStrict;
171
172 /*
173 * The docs are a bit hard to comprehend here, but it looks like we wrap
174 * around in real mode as long as none of the individual "popa" crosses the
175 * end of the stack segment. In protected mode we check the whole access
176 * in one go. For efficiency, only do the word-by-word thing if we're in
177 * danger of wrapping around.
178 */
179 /** @todo do popa boundary / wrap-around checks. */
180 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
181 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
182 {
183 /* word-by-word */
184 RTUINT64U TmpRsp;
185 TmpRsp.u = pCtx->rsp;
186 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
187 if (rcStrict == VINF_SUCCESS)
188 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
189 if (rcStrict == VINF_SUCCESS)
190 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
191 if (rcStrict == VINF_SUCCESS)
192 {
193 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
194 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
195 }
196 if (rcStrict == VINF_SUCCESS)
197 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
198 if (rcStrict == VINF_SUCCESS)
199 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
200 if (rcStrict == VINF_SUCCESS)
201 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
202 if (rcStrict == VINF_SUCCESS)
203 {
204 pCtx->rsp = TmpRsp.u;
205 iemRegAddToRip(pIemCpu, cbInstr);
206 }
207 }
208 else
209 {
210 uint16_t const *pa16Mem = NULL;
211 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
212 if (rcStrict == VINF_SUCCESS)
213 {
214 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
215 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
216 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
217 /* skip sp */
218 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
219 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
220 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
221 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
222 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
223 if (rcStrict == VINF_SUCCESS)
224 {
225 iemRegAddToRsp(pCtx, 16);
226 iemRegAddToRip(pIemCpu, cbInstr);
227 }
228 }
229 }
230 return rcStrict;
231}
232
233
234/**
235 * Implements a 32-bit popa.
236 */
237IEM_CIMPL_DEF_0(iemCImpl_popa_32)
238{
239 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
240 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
241 RTGCPTR GCPtrLast = GCPtrStart + 31;
242 VBOXSTRICTRC rcStrict;
243
244 /*
245 * The docs are a bit hard to comprehend here, but it looks like we wrap
246 * around in real mode as long as none of the individual "popa" crosses the
247 * end of the stack segment. In protected mode we check the whole access
248 * in one go. For efficiency, only do the word-by-word thing if we're in
249 * danger of wrapping around.
250 */
251 /** @todo do popa boundary / wrap-around checks. */
252 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
253 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
254 {
255 /* word-by-word */
256 RTUINT64U TmpRsp;
257 TmpRsp.u = pCtx->rsp;
258 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
259 if (rcStrict == VINF_SUCCESS)
260 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
261 if (rcStrict == VINF_SUCCESS)
262 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
263 if (rcStrict == VINF_SUCCESS)
264 {
265 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
266 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
267 }
268 if (rcStrict == VINF_SUCCESS)
269 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
270 if (rcStrict == VINF_SUCCESS)
271 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
272 if (rcStrict == VINF_SUCCESS)
273 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
274 if (rcStrict == VINF_SUCCESS)
275 {
276#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
277 pCtx->rdi &= UINT32_MAX;
278 pCtx->rsi &= UINT32_MAX;
279 pCtx->rbp &= UINT32_MAX;
280 pCtx->rbx &= UINT32_MAX;
281 pCtx->rdx &= UINT32_MAX;
282 pCtx->rcx &= UINT32_MAX;
283 pCtx->rax &= UINT32_MAX;
284#endif
285 pCtx->rsp = TmpRsp.u;
286 iemRegAddToRip(pIemCpu, cbInstr);
287 }
288 }
289 else
290 {
291 uint32_t const *pa32Mem;
292 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
293 if (rcStrict == VINF_SUCCESS)
294 {
295 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
296 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
297 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
298 /* skip esp */
299 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
300 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
301 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
302 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
303 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
304 if (rcStrict == VINF_SUCCESS)
305 {
306 iemRegAddToRsp(pCtx, 32);
307 iemRegAddToRip(pIemCpu, cbInstr);
308 }
309 }
310 }
311 return rcStrict;
312}
313
314
315/**
316 * Implements a 16-bit pusha.
317 */
318IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
319{
320 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
321 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
322 RTGCPTR GCPtrBottom = GCPtrTop - 15;
323 VBOXSTRICTRC rcStrict;
324
325 /*
326 * The docs are a bit hard to comprehend here, but it looks like we wrap
327 * around in real mode as long as none of the individual "pushd" crosses the
328 * end of the stack segment. In protected mode we check the whole access
329 * in one go. For efficiency, only do the word-by-word thing if we're in
330 * danger of wrapping around.
331 */
332 /** @todo do pusha boundary / wrap-around checks. */
333 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
334 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
335 {
336 /* word-by-word */
337 RTUINT64U TmpRsp;
338 TmpRsp.u = pCtx->rsp;
339 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
340 if (rcStrict == VINF_SUCCESS)
341 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
342 if (rcStrict == VINF_SUCCESS)
343 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
344 if (rcStrict == VINF_SUCCESS)
345 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
346 if (rcStrict == VINF_SUCCESS)
347 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
348 if (rcStrict == VINF_SUCCESS)
349 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
350 if (rcStrict == VINF_SUCCESS)
351 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
352 if (rcStrict == VINF_SUCCESS)
353 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
354 if (rcStrict == VINF_SUCCESS)
355 {
356 pCtx->rsp = TmpRsp.u;
357 iemRegAddToRip(pIemCpu, cbInstr);
358 }
359 }
360 else
361 {
362 GCPtrBottom--;
363 uint16_t *pa16Mem = NULL;
364 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
365 if (rcStrict == VINF_SUCCESS)
366 {
367 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
368 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
369 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
370 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
371 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
372 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
373 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
374 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
375 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
376 if (rcStrict == VINF_SUCCESS)
377 {
378 iemRegSubFromRsp(pCtx, 16);
379 iemRegAddToRip(pIemCpu, cbInstr);
380 }
381 }
382 }
383 return rcStrict;
384}
385
386
387/**
388 * Implements a 32-bit pusha.
389 */
390IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
391{
392 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
393 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
394 RTGCPTR GCPtrBottom = GCPtrTop - 31;
395 VBOXSTRICTRC rcStrict;
396
397 /*
398 * The docs are a bit hard to comprehend here, but it looks like we wrap
399 * around in real mode as long as none of the individual "pusha" crosses the
400 * end of the stack segment. In protected mode we check the whole access
401 * in one go. For efficiency, only do the word-by-word thing if we're in
402 * danger of wrapping around.
403 */
404 /** @todo do pusha boundary / wrap-around checks. */
405 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
406 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
407 {
408 /* word-by-word */
409 RTUINT64U TmpRsp;
410 TmpRsp.u = pCtx->rsp;
411 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
412 if (rcStrict == VINF_SUCCESS)
413 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
414 if (rcStrict == VINF_SUCCESS)
415 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
416 if (rcStrict == VINF_SUCCESS)
417 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
418 if (rcStrict == VINF_SUCCESS)
419 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
420 if (rcStrict == VINF_SUCCESS)
421 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
422 if (rcStrict == VINF_SUCCESS)
423 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
424 if (rcStrict == VINF_SUCCESS)
425 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
426 if (rcStrict == VINF_SUCCESS)
427 {
428 pCtx->rsp = TmpRsp.u;
429 iemRegAddToRip(pIemCpu, cbInstr);
430 }
431 }
432 else
433 {
434 GCPtrBottom--;
435 uint32_t *pa32Mem;
436 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
437 if (rcStrict == VINF_SUCCESS)
438 {
439 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
440 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
441 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
442 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
443 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
444 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
445 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
446 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
447 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
448 if (rcStrict == VINF_SUCCESS)
449 {
450 iemRegSubFromRsp(pCtx, 32);
451 iemRegAddToRip(pIemCpu, cbInstr);
452 }
453 }
454 }
455 return rcStrict;
456}
457
458
459/**
460 * Implements pushf.
461 *
462 *
463 * @param enmEffOpSize The effective operand size.
464 */
465IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
466{
467 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
468
469 /*
470 * If we're in V8086 mode some care is required (which is why we're in
471 * doing this in a C implementation).
472 */
473 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
474 if ( (fEfl & X86_EFL_VM)
475 && X86_EFL_GET_IOPL(fEfl) != 3 )
476 {
477 Assert(pCtx->cr0 & X86_CR0_PE);
478 if ( enmEffOpSize != IEMMODE_16BIT
479 || !(pCtx->cr4 & X86_CR4_VME))
480 return iemRaiseGeneralProtectionFault0(pIemCpu);
481 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
482 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
483 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
484 }
485
486 /*
487 * Ok, clear RF and VM and push the flags.
488 */
489 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
490
491 VBOXSTRICTRC rcStrict;
492 switch (enmEffOpSize)
493 {
494 case IEMMODE_16BIT:
495 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
496 break;
497 case IEMMODE_32BIT:
498 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
499 break;
500 case IEMMODE_64BIT:
501 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
502 break;
503 IEM_NOT_REACHED_DEFAULT_CASE_RET();
504 }
505 if (rcStrict != VINF_SUCCESS)
506 return rcStrict;
507
508 iemRegAddToRip(pIemCpu, cbInstr);
509 return VINF_SUCCESS;
510}
511
512
513/**
514 * Implements popf.
515 *
516 * @param enmEffOpSize The effective operand size.
517 */
518IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
519{
520 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
521 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
522 uint32_t const fEflOld = IEMMISC_GET_EFL(pIemCpu, pCtx);
523 VBOXSTRICTRC rcStrict;
524 uint32_t fEflNew;
525
526 /*
527 * V8086 is special as usual.
528 */
529 if (fEflOld & X86_EFL_VM)
530 {
531 /*
532 * Almost anything goes if IOPL is 3.
533 */
534 if (X86_EFL_GET_IOPL(fEflOld) == 3)
535 {
536 switch (enmEffOpSize)
537 {
538 case IEMMODE_16BIT:
539 {
540 uint16_t u16Value;
541 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
542 if (rcStrict != VINF_SUCCESS)
543 return rcStrict;
544 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
545 break;
546 }
547 case IEMMODE_32BIT:
548 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
549 if (rcStrict != VINF_SUCCESS)
550 return rcStrict;
551 break;
552 IEM_NOT_REACHED_DEFAULT_CASE_RET();
553 }
554
555 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
556 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
557 }
558 /*
559 * Interrupt flag virtualization with CR4.VME=1.
560 */
561 else if ( enmEffOpSize == IEMMODE_16BIT
562 && (pCtx->cr4 & X86_CR4_VME) )
563 {
564 uint16_t u16Value;
565 RTUINT64U TmpRsp;
566 TmpRsp.u = pCtx->rsp;
567 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
568 if (rcStrict != VINF_SUCCESS)
569 return rcStrict;
570
571 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
572 * or before? */
573 if ( ( (u16Value & X86_EFL_IF)
574 && (fEflOld & X86_EFL_VIP))
575 || (u16Value & X86_EFL_TF) )
576 return iemRaiseGeneralProtectionFault0(pIemCpu);
577
578 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
579 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
580 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
581 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
582
583 pCtx->rsp = TmpRsp.u;
584 }
585 else
586 return iemRaiseGeneralProtectionFault0(pIemCpu);
587
588 }
589 /*
590 * Not in V8086 mode.
591 */
592 else
593 {
594 /* Pop the flags. */
595 switch (enmEffOpSize)
596 {
597 case IEMMODE_16BIT:
598 {
599 uint16_t u16Value;
600 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
601 if (rcStrict != VINF_SUCCESS)
602 return rcStrict;
603 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
604 break;
605 }
606 case IEMMODE_32BIT:
607 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
608 if (rcStrict != VINF_SUCCESS)
609 return rcStrict;
610 break;
611 case IEMMODE_64BIT:
612 {
613 uint64_t u64Value;
614 rcStrict = iemMemStackPopU64(pIemCpu, &u64Value);
615 if (rcStrict != VINF_SUCCESS)
616 return rcStrict;
617 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
618 break;
619 }
620 IEM_NOT_REACHED_DEFAULT_CASE_RET();
621 }
622
623 /* Merge them with the current flags. */
624 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
625 || pIemCpu->uCpl == 0)
626 {
627 fEflNew &= X86_EFL_POPF_BITS;
628 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
629 }
630 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
631 {
632 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
633 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
634 }
635 else
636 {
637 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
638 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
639 }
640 }
641
642 /*
643 * Commit the flags.
644 */
645 Assert(fEflNew & RT_BIT_32(1));
646 IEMMISC_SET_EFL(pIemCpu, pCtx, fEflNew);
647 iemRegAddToRip(pIemCpu, cbInstr);
648
649 return VINF_SUCCESS;
650}
651
652
653/**
654 * Implements an indirect call.
655 *
656 * @param uNewPC The new program counter (RIP) value (loaded from the
657 * operand).
658 * @param enmEffOpSize The effective operand size.
659 */
660IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
661{
662 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
663 uint16_t uOldPC = pCtx->ip + cbInstr;
664 if (uNewPC > pCtx->cs.u32Limit)
665 return iemRaiseGeneralProtectionFault0(pIemCpu);
666
667 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
668 if (rcStrict != VINF_SUCCESS)
669 return rcStrict;
670
671 pCtx->rip = uNewPC;
672 return VINF_SUCCESS;
673
674}
675
676
677/**
678 * Implements a 16-bit relative call.
679 *
680 * @param offDisp The displacment offset.
681 */
682IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
683{
684 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
685 uint16_t uOldPC = pCtx->ip + cbInstr;
686 uint16_t uNewPC = uOldPC + offDisp;
687 if (uNewPC > pCtx->cs.u32Limit)
688 return iemRaiseGeneralProtectionFault0(pIemCpu);
689
690 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
691 if (rcStrict != VINF_SUCCESS)
692 return rcStrict;
693
694 pCtx->rip = uNewPC;
695 return VINF_SUCCESS;
696}
697
698
699/**
700 * Implements a 32-bit indirect call.
701 *
702 * @param uNewPC The new program counter (RIP) value (loaded from the
703 * operand).
704 * @param enmEffOpSize The effective operand size.
705 */
706IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
707{
708 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
709 uint32_t uOldPC = pCtx->eip + cbInstr;
710 if (uNewPC > pCtx->cs.u32Limit)
711 return iemRaiseGeneralProtectionFault0(pIemCpu);
712
713 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
714 if (rcStrict != VINF_SUCCESS)
715 return rcStrict;
716
717 pCtx->rip = uNewPC;
718 return VINF_SUCCESS;
719
720}
721
722
723/**
724 * Implements a 32-bit relative call.
725 *
726 * @param offDisp The displacment offset.
727 */
728IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
729{
730 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
731 uint32_t uOldPC = pCtx->eip + cbInstr;
732 uint32_t uNewPC = uOldPC + offDisp;
733 if (uNewPC > pCtx->cs.u32Limit)
734 return iemRaiseGeneralProtectionFault0(pIemCpu);
735
736 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
737 if (rcStrict != VINF_SUCCESS)
738 return rcStrict;
739
740 pCtx->rip = uNewPC;
741 return VINF_SUCCESS;
742}
743
744
745/**
746 * Implements a 64-bit indirect call.
747 *
748 * @param uNewPC The new program counter (RIP) value (loaded from the
749 * operand).
750 * @param enmEffOpSize The effective operand size.
751 */
752IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
753{
754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
755 uint64_t uOldPC = pCtx->rip + cbInstr;
756 if (!IEM_IS_CANONICAL(uNewPC))
757 return iemRaiseGeneralProtectionFault0(pIemCpu);
758
759 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
760 if (rcStrict != VINF_SUCCESS)
761 return rcStrict;
762
763 pCtx->rip = uNewPC;
764 return VINF_SUCCESS;
765
766}
767
768
769/**
770 * Implements a 64-bit relative call.
771 *
772 * @param offDisp The displacment offset.
773 */
774IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
775{
776 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
777 uint64_t uOldPC = pCtx->rip + cbInstr;
778 uint64_t uNewPC = uOldPC + offDisp;
779 if (!IEM_IS_CANONICAL(uNewPC))
780 return iemRaiseNotCanonical(pIemCpu);
781
782 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
783 if (rcStrict != VINF_SUCCESS)
784 return rcStrict;
785
786 pCtx->rip = uNewPC;
787 return VINF_SUCCESS;
788}
789
790
791/**
792 * Implements far jumps and calls thru task segments (TSS).
793 *
794 * @param uSel The selector.
795 * @param enmBranch The kind of branching we're performing.
796 * @param enmEffOpSize The effective operand size.
797 * @param pDesc The descriptor corrsponding to @a uSel. The type is
798 * call gate.
799 */
800IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
801{
802 /* Call various functions to do the work. */
803 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
804}
805
806
807/**
808 * Implements far jumps and calls thru task gates.
809 *
810 * @param uSel The selector.
811 * @param enmBranch The kind of branching we're performing.
812 * @param enmEffOpSize The effective operand size.
813 * @param pDesc The descriptor corrsponding to @a uSel. The type is
814 * call gate.
815 */
816IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
817{
818 /* Call various functions to do the work. */
819 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
820}
821
822
823/**
824 * Implements far jumps and calls thru call gates.
825 *
826 * @param uSel The selector.
827 * @param enmBranch The kind of branching we're performing.
828 * @param enmEffOpSize The effective operand size.
829 * @param pDesc The descriptor corrsponding to @a uSel. The type is
830 * call gate.
831 */
832IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
833{
834 /* Call various functions to do the work. */
835 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
836}
837
838
839/**
840 * Implements far jumps and calls thru system selectors.
841 *
842 * @param uSel The selector.
843 * @param enmBranch The kind of branching we're performing.
844 * @param enmEffOpSize The effective operand size.
845 * @param pDesc The descriptor corrsponding to @a uSel.
846 */
847IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
848{
849 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
850 Assert((uSel & X86_SEL_MASK_OFF_RPL));
851
852 if (IEM_IS_LONG_MODE(pIemCpu))
853 switch (pDesc->Legacy.Gen.u4Type)
854 {
855 case AMD64_SEL_TYPE_SYS_CALL_GATE:
856 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
857
858 default:
859 case AMD64_SEL_TYPE_SYS_LDT:
860 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
861 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
862 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
863 case AMD64_SEL_TYPE_SYS_INT_GATE:
864 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
865 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
866
867 }
868
869 switch (pDesc->Legacy.Gen.u4Type)
870 {
871 case X86_SEL_TYPE_SYS_286_CALL_GATE:
872 case X86_SEL_TYPE_SYS_386_CALL_GATE:
873 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
874
875 case X86_SEL_TYPE_SYS_TASK_GATE:
876 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
877
878 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
879 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
880 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
881
882 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
883 Log(("branch %04x -> busy 286 TSS\n", uSel));
884 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
885
886 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
887 Log(("branch %04x -> busy 386 TSS\n", uSel));
888 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
889
890 default:
891 case X86_SEL_TYPE_SYS_LDT:
892 case X86_SEL_TYPE_SYS_286_INT_GATE:
893 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
894 case X86_SEL_TYPE_SYS_386_INT_GATE:
895 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
896 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
897 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
898 }
899}
900
901
902/**
903 * Implements far jumps.
904 *
905 * @param uSel The selector.
906 * @param offSeg The segment offset.
907 * @param enmEffOpSize The effective operand size.
908 */
909IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
910{
911 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
912 NOREF(cbInstr);
913 Assert(offSeg <= UINT32_MAX);
914
915 /*
916 * Real mode and V8086 mode are easy. The only snag seems to be that
917 * CS.limit doesn't change and the limit check is done against the current
918 * limit.
919 */
920 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
921 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
922 {
923 if (offSeg > pCtx->cs.u32Limit)
924 return iemRaiseGeneralProtectionFault0(pIemCpu);
925
926 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
927 pCtx->rip = offSeg;
928 else
929 pCtx->rip = offSeg & UINT16_MAX;
930 pCtx->cs.Sel = uSel;
931 pCtx->cs.ValidSel = uSel;
932 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
933 pCtx->cs.u64Base = (uint32_t)uSel << 4;
934 return VINF_SUCCESS;
935 }
936
937 /*
938 * Protected mode. Need to parse the specified descriptor...
939 */
940 if (!(uSel & X86_SEL_MASK_OFF_RPL))
941 {
942 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
943 return iemRaiseGeneralProtectionFault0(pIemCpu);
944 }
945
946 /* Fetch the descriptor. */
947 IEMSELDESC Desc;
948 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
949 if (rcStrict != VINF_SUCCESS)
950 return rcStrict;
951
952 /* Is it there? */
953 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
954 {
955 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
956 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
957 }
958
959 /*
960 * Deal with it according to its type. We do the standard code selectors
961 * here and dispatch the system selectors to worker functions.
962 */
963 if (!Desc.Legacy.Gen.u1DescType)
964 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
965
966 /* Only code segments. */
967 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
968 {
969 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
970 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
971 }
972
973 /* L vs D. */
974 if ( Desc.Legacy.Gen.u1Long
975 && Desc.Legacy.Gen.u1DefBig
976 && IEM_IS_LONG_MODE(pIemCpu))
977 {
978 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
979 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
980 }
981
982 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
983 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
984 {
985 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
986 {
987 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
988 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
989 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
990 }
991 }
992 else
993 {
994 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
995 {
996 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
997 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
998 }
999 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1000 {
1001 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1002 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1003 }
1004 }
1005
1006 /* Chop the high bits if 16-bit (Intel says so). */
1007 if (enmEffOpSize == IEMMODE_16BIT)
1008 offSeg &= UINT16_MAX;
1009
1010 /* Limit check. (Should alternatively check for non-canonical addresses
1011 here, but that is ruled out by offSeg being 32-bit, right?) */
1012 uint64_t u64Base;
1013 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1014 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1015 u64Base = 0;
1016 else
1017 {
1018 if (offSeg > cbLimit)
1019 {
1020 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1021 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1022 }
1023 u64Base = X86DESC_BASE(&Desc.Legacy);
1024 }
1025
1026 /*
1027 * Ok, everything checked out fine. Now set the accessed bit before
1028 * committing the result into CS, CSHID and RIP.
1029 */
1030 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1031 {
1032 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1033 if (rcStrict != VINF_SUCCESS)
1034 return rcStrict;
1035 /** @todo check what VT-x and AMD-V does. */
1036 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1037 }
1038
1039 /* commit */
1040 pCtx->rip = offSeg;
1041 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1042 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1043 pCtx->cs.ValidSel = pCtx->cs.Sel;
1044 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1045 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1046 pCtx->cs.u32Limit = cbLimit;
1047 pCtx->cs.u64Base = u64Base;
1048 /** @todo check if the hidden bits are loaded correctly for 64-bit
1049 * mode. */
1050 return VINF_SUCCESS;
1051}
1052
1053
1054/**
1055 * Implements far calls.
1056 *
1057 * This very similar to iemCImpl_FarJmp.
1058 *
1059 * @param uSel The selector.
1060 * @param offSeg The segment offset.
1061 * @param enmEffOpSize The operand size (in case we need it).
1062 */
1063IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1064{
1065 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1066 VBOXSTRICTRC rcStrict;
1067 uint64_t uNewRsp;
1068 RTPTRUNION uPtrRet;
1069
1070 /*
1071 * Real mode and V8086 mode are easy. The only snag seems to be that
1072 * CS.limit doesn't change and the limit check is done against the current
1073 * limit.
1074 */
1075 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1076 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1077 {
1078 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1079
1080 /* Check stack first - may #SS(0). */
1081 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1082 &uPtrRet.pv, &uNewRsp);
1083 if (rcStrict != VINF_SUCCESS)
1084 return rcStrict;
1085
1086 /* Check the target address range. */
1087 if (offSeg > UINT32_MAX)
1088 return iemRaiseGeneralProtectionFault0(pIemCpu);
1089
1090 /* Everything is fine, push the return address. */
1091 if (enmEffOpSize == IEMMODE_16BIT)
1092 {
1093 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1094 uPtrRet.pu16[1] = pCtx->cs.Sel;
1095 }
1096 else
1097 {
1098 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1099 uPtrRet.pu16[3] = pCtx->cs.Sel;
1100 }
1101 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1102 if (rcStrict != VINF_SUCCESS)
1103 return rcStrict;
1104
1105 /* Branch. */
1106 pCtx->rip = offSeg;
1107 pCtx->cs.Sel = uSel;
1108 pCtx->cs.ValidSel = uSel;
1109 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1110 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1111 return VINF_SUCCESS;
1112 }
1113
1114 /*
1115 * Protected mode. Need to parse the specified descriptor...
1116 */
1117 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1118 {
1119 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1120 return iemRaiseGeneralProtectionFault0(pIemCpu);
1121 }
1122
1123 /* Fetch the descriptor. */
1124 IEMSELDESC Desc;
1125 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1126 if (rcStrict != VINF_SUCCESS)
1127 return rcStrict;
1128
1129 /*
1130 * Deal with it according to its type. We do the standard code selectors
1131 * here and dispatch the system selectors to worker functions.
1132 */
1133 if (!Desc.Legacy.Gen.u1DescType)
1134 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1135
1136 /* Only code segments. */
1137 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1138 {
1139 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1140 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1141 }
1142
1143 /* L vs D. */
1144 if ( Desc.Legacy.Gen.u1Long
1145 && Desc.Legacy.Gen.u1DefBig
1146 && IEM_IS_LONG_MODE(pIemCpu))
1147 {
1148 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1149 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1150 }
1151
1152 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1153 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1154 {
1155 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1156 {
1157 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1158 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1159 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1160 }
1161 }
1162 else
1163 {
1164 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1165 {
1166 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1167 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1168 }
1169 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1170 {
1171 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1172 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1173 }
1174 }
1175
1176 /* Is it there? */
1177 if (!Desc.Legacy.Gen.u1Present)
1178 {
1179 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1180 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1181 }
1182
1183 /* Check stack first - may #SS(0). */
1184 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1185 * 16-bit code cause a two or four byte CS to be pushed? */
1186 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1187 enmEffOpSize == IEMMODE_64BIT ? 8+8
1188 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1189 &uPtrRet.pv, &uNewRsp);
1190 if (rcStrict != VINF_SUCCESS)
1191 return rcStrict;
1192
1193 /* Chop the high bits if 16-bit (Intel says so). */
1194 if (enmEffOpSize == IEMMODE_16BIT)
1195 offSeg &= UINT16_MAX;
1196
1197 /* Limit / canonical check. */
1198 uint64_t u64Base;
1199 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1200 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1201 {
1202 if (!IEM_IS_CANONICAL(offSeg))
1203 {
1204 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
1205 return iemRaiseNotCanonical(pIemCpu);
1206 }
1207 u64Base = 0;
1208 }
1209 else
1210 {
1211 if (offSeg > cbLimit)
1212 {
1213 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1214 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1215 }
1216 u64Base = X86DESC_BASE(&Desc.Legacy);
1217 }
1218
1219 /*
1220 * Now set the accessed bit before
1221 * writing the return address to the stack and committing the result into
1222 * CS, CSHID and RIP.
1223 */
1224 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1225 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1226 {
1227 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1228 if (rcStrict != VINF_SUCCESS)
1229 return rcStrict;
1230 /** @todo check what VT-x and AMD-V does. */
1231 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1232 }
1233
1234 /* stack */
1235 if (enmEffOpSize == IEMMODE_16BIT)
1236 {
1237 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1238 uPtrRet.pu16[1] = pCtx->cs.Sel;
1239 }
1240 else if (enmEffOpSize == IEMMODE_32BIT)
1241 {
1242 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1243 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
1244 }
1245 else
1246 {
1247 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1248 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
1249 }
1250 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1251 if (rcStrict != VINF_SUCCESS)
1252 return rcStrict;
1253
1254 /* commit */
1255 pCtx->rip = offSeg;
1256 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1257 pCtx->cs.Sel |= pIemCpu->uCpl;
1258 pCtx->cs.ValidSel = pCtx->cs.Sel;
1259 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1260 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1261 pCtx->cs.u32Limit = cbLimit;
1262 pCtx->cs.u64Base = u64Base;
1263 /** @todo check if the hidden bits are loaded correctly for 64-bit
1264 * mode. */
1265 return VINF_SUCCESS;
1266}
1267
1268
1269/**
1270 * Implements retf.
1271 *
1272 * @param enmEffOpSize The effective operand size.
1273 * @param cbPop The amount of arguments to pop from the stack
1274 * (bytes).
1275 */
1276IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1277{
1278 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1279 VBOXSTRICTRC rcStrict;
1280 RTCPTRUNION uPtrFrame;
1281 uint64_t uNewRsp;
1282 uint64_t uNewRip;
1283 uint16_t uNewCs;
1284 NOREF(cbInstr);
1285
1286 /*
1287 * Read the stack values first.
1288 */
1289 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
1290 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
1291 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
1292 if (rcStrict != VINF_SUCCESS)
1293 return rcStrict;
1294 if (enmEffOpSize == IEMMODE_16BIT)
1295 {
1296 uNewRip = uPtrFrame.pu16[0];
1297 uNewCs = uPtrFrame.pu16[1];
1298 }
1299 else if (enmEffOpSize == IEMMODE_32BIT)
1300 {
1301 uNewRip = uPtrFrame.pu32[0];
1302 uNewCs = uPtrFrame.pu16[2];
1303 }
1304 else
1305 {
1306 uNewRip = uPtrFrame.pu64[0];
1307 uNewCs = uPtrFrame.pu16[4];
1308 }
1309
1310 /*
1311 * Real mode and V8086 mode are easy.
1312 */
1313 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1314 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1315 {
1316 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1317 /** @todo check how this is supposed to work if sp=0xfffe. */
1318
1319 /* Check the limit of the new EIP. */
1320 /** @todo Intel pseudo code only does the limit check for 16-bit
1321 * operands, AMD does not make any distinction. What is right? */
1322 if (uNewRip > pCtx->cs.u32Limit)
1323 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1324
1325 /* commit the operation. */
1326 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1327 if (rcStrict != VINF_SUCCESS)
1328 return rcStrict;
1329 pCtx->rip = uNewRip;
1330 pCtx->cs.Sel = uNewCs;
1331 pCtx->cs.ValidSel = uNewCs;
1332 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1333 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
1334 /** @todo do we load attribs and limit as well? */
1335 if (cbPop)
1336 iemRegAddToRsp(pCtx, cbPop);
1337 return VINF_SUCCESS;
1338 }
1339
1340 /*
1341 * Protected mode is complicated, of course.
1342 */
1343 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
1344 {
1345 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
1346 return iemRaiseGeneralProtectionFault0(pIemCpu);
1347 }
1348
1349 /* Fetch the descriptor. */
1350 IEMSELDESC DescCs;
1351 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs);
1352 if (rcStrict != VINF_SUCCESS)
1353 return rcStrict;
1354
1355 /* Can only return to a code selector. */
1356 if ( !DescCs.Legacy.Gen.u1DescType
1357 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1358 {
1359 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1360 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
1361 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1362 }
1363
1364 /* L vs D. */
1365 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
1366 && DescCs.Legacy.Gen.u1DefBig
1367 && IEM_IS_LONG_MODE(pIemCpu))
1368 {
1369 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
1370 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1371 }
1372
1373 /* DPL/RPL/CPL checks. */
1374 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1375 {
1376 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
1377 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1378 }
1379
1380 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1381 {
1382 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
1383 {
1384 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
1385 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1386 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1387 }
1388 }
1389 else
1390 {
1391 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
1392 {
1393 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
1394 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1395 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1396 }
1397 }
1398
1399 /* Is it there? */
1400 if (!DescCs.Legacy.Gen.u1Present)
1401 {
1402 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
1403 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1404 }
1405
1406 /*
1407 * Return to outer privilege? (We'll typically have entered via a call gate.)
1408 */
1409 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
1410 {
1411 /* Read the return pointer, it comes before the parameters. */
1412 RTCPTRUNION uPtrStack;
1413 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
1414 if (rcStrict != VINF_SUCCESS)
1415 return rcStrict;
1416 uint16_t uNewOuterSs;
1417 uint64_t uNewOuterRsp;
1418 if (enmEffOpSize == IEMMODE_16BIT)
1419 {
1420 uNewOuterRsp = uPtrFrame.pu16[0];
1421 uNewOuterSs = uPtrFrame.pu16[1];
1422 }
1423 else if (enmEffOpSize == IEMMODE_32BIT)
1424 {
1425 uNewOuterRsp = uPtrFrame.pu32[0];
1426 uNewOuterSs = uPtrFrame.pu16[2];
1427 }
1428 else
1429 {
1430 uNewOuterRsp = uPtrFrame.pu64[0];
1431 uNewOuterSs = uPtrFrame.pu16[4];
1432 }
1433
1434 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
1435 and read the selector. */
1436 IEMSELDESC DescSs;
1437 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
1438 {
1439 if ( !DescCs.Legacy.Gen.u1Long
1440 || (uNewOuterSs & X86_SEL_RPL) == 3)
1441 {
1442 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
1443 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1444 return iemRaiseGeneralProtectionFault0(pIemCpu);
1445 }
1446 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
1447 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
1448 }
1449 else
1450 {
1451 /* Fetch the descriptor for the new stack segment. */
1452 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs);
1453 if (rcStrict != VINF_SUCCESS)
1454 return rcStrict;
1455 }
1456
1457 /* Check that RPL of stack and code selectors match. */
1458 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
1459 {
1460 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1461 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1462 }
1463
1464 /* Must be a writable data segment. */
1465 if ( !DescSs.Legacy.Gen.u1DescType
1466 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1467 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1468 {
1469 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
1470 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1471 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1472 }
1473
1474 /* L vs D. (Not mentioned by intel.) */
1475 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
1476 && DescSs.Legacy.Gen.u1DefBig
1477 && IEM_IS_LONG_MODE(pIemCpu))
1478 {
1479 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
1480 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1481 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1482 }
1483
1484 /* DPL/RPL/CPL checks. */
1485 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
1486 {
1487 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
1488 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
1489 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1490 }
1491
1492 /* Is it there? */
1493 if (!DescSs.Legacy.Gen.u1Present)
1494 {
1495 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1496 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1497 }
1498
1499 /* Calc SS limit.*/
1500 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
1501
1502 /* Is RIP canonical or within CS.limit? */
1503 uint64_t u64Base;
1504 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1505
1506 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1507 {
1508 if (!IEM_IS_CANONICAL(uNewRip))
1509 {
1510 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1511 return iemRaiseNotCanonical(pIemCpu);
1512 }
1513 u64Base = 0;
1514 }
1515 else
1516 {
1517 if (uNewRip > cbLimitCs)
1518 {
1519 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
1520 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
1521 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1522 }
1523 u64Base = X86DESC_BASE(&DescCs.Legacy);
1524 }
1525
1526 /*
1527 * Now set the accessed bit before
1528 * writing the return address to the stack and committing the result into
1529 * CS, CSHID and RIP.
1530 */
1531 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
1532 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1533 {
1534 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1535 if (rcStrict != VINF_SUCCESS)
1536 return rcStrict;
1537 /** @todo check what VT-x and AMD-V does. */
1538 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1539 }
1540 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
1541 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1542 {
1543 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
1544 if (rcStrict != VINF_SUCCESS)
1545 return rcStrict;
1546 /** @todo check what VT-x and AMD-V does. */
1547 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1548 }
1549
1550 /* commit */
1551 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1552 if (rcStrict != VINF_SUCCESS)
1553 return rcStrict;
1554 if (enmEffOpSize == IEMMODE_16BIT)
1555 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1556 else
1557 pCtx->rip = uNewRip;
1558 pCtx->cs.Sel = uNewCs;
1559 pCtx->cs.ValidSel = uNewCs;
1560 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1561 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1562 pCtx->cs.u32Limit = cbLimitCs;
1563 pCtx->cs.u64Base = u64Base;
1564 pCtx->rsp = uNewRsp;
1565 pCtx->ss.Sel = uNewOuterSs;
1566 pCtx->ss.ValidSel = uNewOuterSs;
1567 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1568 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
1569 pCtx->ss.u32Limit = cbLimitSs;
1570 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1571 pCtx->ss.u64Base = 0;
1572 else
1573 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
1574
1575 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
1576 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
1577 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
1578 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
1579 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
1580
1581 /** @todo check if the hidden bits are loaded correctly for 64-bit
1582 * mode. */
1583
1584 if (cbPop)
1585 iemRegAddToRsp(pCtx, cbPop);
1586
1587 /* Done! */
1588 }
1589 /*
1590 * Return to the same privilege level
1591 */
1592 else
1593 {
1594 /* Limit / canonical check. */
1595 uint64_t u64Base;
1596 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1597
1598 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1599 {
1600 if (!IEM_IS_CANONICAL(uNewRip))
1601 {
1602 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
1603 return iemRaiseNotCanonical(pIemCpu);
1604 }
1605 u64Base = 0;
1606 }
1607 else
1608 {
1609 if (uNewRip > cbLimitCs)
1610 {
1611 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
1612 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1613 }
1614 u64Base = X86DESC_BASE(&DescCs.Legacy);
1615 }
1616
1617 /*
1618 * Now set the accessed bit before
1619 * writing the return address to the stack and committing the result into
1620 * CS, CSHID and RIP.
1621 */
1622 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1623 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1624 {
1625 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1626 if (rcStrict != VINF_SUCCESS)
1627 return rcStrict;
1628 /** @todo check what VT-x and AMD-V does. */
1629 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1630 }
1631
1632 /* commit */
1633 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1634 if (rcStrict != VINF_SUCCESS)
1635 return rcStrict;
1636 if (enmEffOpSize == IEMMODE_16BIT)
1637 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1638 else
1639 pCtx->rip = uNewRip;
1640 pCtx->cs.Sel = uNewCs;
1641 pCtx->cs.ValidSel = uNewCs;
1642 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1643 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1644 pCtx->cs.u32Limit = cbLimitCs;
1645 pCtx->cs.u64Base = u64Base;
1646 /** @todo check if the hidden bits are loaded correctly for 64-bit
1647 * mode. */
1648 if (cbPop)
1649 iemRegAddToRsp(pCtx, cbPop);
1650 }
1651 return VINF_SUCCESS;
1652}
1653
1654
1655/**
1656 * Implements retn.
1657 *
1658 * We're doing this in C because of the \#GP that might be raised if the popped
1659 * program counter is out of bounds.
1660 *
1661 * @param enmEffOpSize The effective operand size.
1662 * @param cbPop The amount of arguments to pop from the stack
1663 * (bytes).
1664 */
1665IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1666{
1667 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1668 NOREF(cbInstr);
1669
1670 /* Fetch the RSP from the stack. */
1671 VBOXSTRICTRC rcStrict;
1672 RTUINT64U NewRip;
1673 RTUINT64U NewRsp;
1674 NewRsp.u = pCtx->rsp;
1675 switch (enmEffOpSize)
1676 {
1677 case IEMMODE_16BIT:
1678 NewRip.u = 0;
1679 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1680 break;
1681 case IEMMODE_32BIT:
1682 NewRip.u = 0;
1683 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1684 break;
1685 case IEMMODE_64BIT:
1686 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1687 break;
1688 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1689 }
1690 if (rcStrict != VINF_SUCCESS)
1691 return rcStrict;
1692
1693 /* Check the new RSP before loading it. */
1694 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1695 * of it. The canonical test is performed here and for call. */
1696 if (enmEffOpSize != IEMMODE_64BIT)
1697 {
1698 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
1699 {
1700 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
1701 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1702 }
1703 }
1704 else
1705 {
1706 if (!IEM_IS_CANONICAL(NewRip.u))
1707 {
1708 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1709 return iemRaiseNotCanonical(pIemCpu);
1710 }
1711 }
1712
1713 /* Commit it. */
1714 pCtx->rip = NewRip.u;
1715 pCtx->rsp = NewRsp.u;
1716 if (cbPop)
1717 iemRegAddToRsp(pCtx, cbPop);
1718
1719 return VINF_SUCCESS;
1720}
1721
1722
1723/**
1724 * Implements enter.
1725 *
1726 * We're doing this in C because the instruction is insane, even for the
1727 * u8NestingLevel=0 case dealing with the stack is tedious.
1728 *
1729 * @param enmEffOpSize The effective operand size.
1730 */
1731IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
1732{
1733 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1734
1735 /* Push RBP, saving the old value in TmpRbp. */
1736 RTUINT64U NewRsp; NewRsp.u = pCtx->rsp;
1737 RTUINT64U TmpRbp; TmpRbp.u = pCtx->rbp;
1738 RTUINT64U NewRbp;
1739 VBOXSTRICTRC rcStrict;
1740 if (enmEffOpSize == IEMMODE_64BIT)
1741 {
1742 rcStrict = iemMemStackPushU64Ex(pIemCpu, TmpRbp.u, &NewRsp);
1743 NewRbp = NewRsp;
1744 }
1745 else if (pCtx->ss.Attr.n.u1DefBig)
1746 {
1747 rcStrict = iemMemStackPushU32Ex(pIemCpu, TmpRbp.DWords.dw0, &NewRsp);
1748 NewRbp = NewRsp;
1749 }
1750 else
1751 {
1752 rcStrict = iemMemStackPushU16Ex(pIemCpu, TmpRbp.Words.w0, &NewRsp);
1753 NewRbp = TmpRbp;
1754 NewRbp.Words.w0 = NewRsp.Words.w0;
1755 }
1756 if (rcStrict != VINF_SUCCESS)
1757 return rcStrict;
1758
1759 /* Copy the parameters (aka nesting levels by Intel). */
1760 cParameters &= 0x1f;
1761 if (cParameters > 0)
1762 {
1763 switch (enmEffOpSize)
1764 {
1765 case IEMMODE_16BIT:
1766 if (pCtx->ss.Attr.n.u1DefBig)
1767 TmpRbp.DWords.dw0 -= 2;
1768 else
1769 TmpRbp.Words.w0 -= 2;
1770 do
1771 {
1772 uint16_t u16Tmp;
1773 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Tmp, &TmpRbp);
1774 if (rcStrict != VINF_SUCCESS)
1775 break;
1776 rcStrict = iemMemStackPushU16Ex(pIemCpu, u16Tmp, &NewRsp);
1777 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1778 break;
1779
1780 case IEMMODE_32BIT:
1781 if (pCtx->ss.Attr.n.u1DefBig)
1782 TmpRbp.DWords.dw0 -= 4;
1783 else
1784 TmpRbp.Words.w0 -= 4;
1785 do
1786 {
1787 uint32_t u32Tmp;
1788 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Tmp, &TmpRbp);
1789 if (rcStrict != VINF_SUCCESS)
1790 break;
1791 rcStrict = iemMemStackPushU32Ex(pIemCpu, u32Tmp, &NewRsp);
1792 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1793 break;
1794
1795 case IEMMODE_64BIT:
1796 TmpRbp.u -= 8;
1797 do
1798 {
1799 uint64_t u64Tmp;
1800 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Tmp, &TmpRbp);
1801 if (rcStrict != VINF_SUCCESS)
1802 break;
1803 rcStrict = iemMemStackPushU64Ex(pIemCpu, u64Tmp, &NewRsp);
1804 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1805 break;
1806
1807 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1808 }
1809 if (rcStrict != VINF_SUCCESS)
1810 return VINF_SUCCESS;
1811
1812 /* Push the new RBP */
1813 if (enmEffOpSize == IEMMODE_64BIT)
1814 rcStrict = iemMemStackPushU64Ex(pIemCpu, NewRbp.u, &NewRsp);
1815 else if (pCtx->ss.Attr.n.u1DefBig)
1816 rcStrict = iemMemStackPushU32Ex(pIemCpu, NewRbp.DWords.dw0, &NewRsp);
1817 else
1818 rcStrict = iemMemStackPushU16Ex(pIemCpu, NewRbp.Words.w0, &NewRsp);
1819 if (rcStrict != VINF_SUCCESS)
1820 return rcStrict;
1821
1822 }
1823
1824 /* Recalc RSP. */
1825 iemRegSubFromRspEx(&NewRsp, cbFrame, pCtx);
1826
1827 /** @todo Should probe write access at the new RSP according to AMD. */
1828
1829 /* Commit it. */
1830 pCtx->rbp = NewRbp.u;
1831 pCtx->rsp = NewRsp.u;
1832 iemRegAddToRip(pIemCpu, cbInstr);
1833
1834 return VINF_SUCCESS;
1835}
1836
1837
1838
1839/**
1840 * Implements leave.
1841 *
1842 * We're doing this in C because messing with the stack registers is annoying
1843 * since they depends on SS attributes.
1844 *
1845 * @param enmEffOpSize The effective operand size.
1846 */
1847IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1848{
1849 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1850
1851 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1852 RTUINT64U NewRsp;
1853 if (pCtx->ss.Attr.n.u1Long)
1854 NewRsp.u = pCtx->rbp;
1855 else if (pCtx->ss.Attr.n.u1DefBig)
1856 NewRsp.u = pCtx->ebp;
1857 else
1858 {
1859 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1860 NewRsp.u = pCtx->rsp;
1861 NewRsp.Words.w0 = pCtx->bp;
1862 }
1863
1864 /* Pop RBP according to the operand size. */
1865 VBOXSTRICTRC rcStrict;
1866 RTUINT64U NewRbp;
1867 switch (enmEffOpSize)
1868 {
1869 case IEMMODE_16BIT:
1870 NewRbp.u = pCtx->rbp;
1871 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1872 break;
1873 case IEMMODE_32BIT:
1874 NewRbp.u = 0;
1875 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1876 break;
1877 case IEMMODE_64BIT:
1878 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1879 break;
1880 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1881 }
1882 if (rcStrict != VINF_SUCCESS)
1883 return rcStrict;
1884
1885
1886 /* Commit it. */
1887 pCtx->rbp = NewRbp.u;
1888 pCtx->rsp = NewRsp.u;
1889 iemRegAddToRip(pIemCpu, cbInstr);
1890
1891 return VINF_SUCCESS;
1892}
1893
1894
1895/**
1896 * Implements int3 and int XX.
1897 *
1898 * @param u8Int The interrupt vector number.
1899 * @param fIsBpInstr Is it the breakpoint instruction.
1900 */
1901IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1902{
1903 Assert(pIemCpu->cXcptRecursions == 0);
1904 return iemRaiseXcptOrInt(pIemCpu,
1905 cbInstr,
1906 u8Int,
1907 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
1908 0,
1909 0);
1910}
1911
1912
1913/**
1914 * Implements iret for real mode and V8086 mode.
1915 *
1916 * @param enmEffOpSize The effective operand size.
1917 */
1918IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
1919{
1920 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1921 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1922 X86EFLAGS Efl;
1923 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
1924 NOREF(cbInstr);
1925
1926 /*
1927 * iret throws an exception if VME isn't enabled.
1928 */
1929 if ( pCtx->eflags.Bits.u1VM
1930 && !(pCtx->cr4 & X86_CR4_VME))
1931 return iemRaiseGeneralProtectionFault0(pIemCpu);
1932
1933 /*
1934 * Do the stack bits, but don't commit RSP before everything checks
1935 * out right.
1936 */
1937 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1938 VBOXSTRICTRC rcStrict;
1939 RTCPTRUNION uFrame;
1940 uint16_t uNewCs;
1941 uint32_t uNewEip;
1942 uint32_t uNewFlags;
1943 uint64_t uNewRsp;
1944 if (enmEffOpSize == IEMMODE_32BIT)
1945 {
1946 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1947 if (rcStrict != VINF_SUCCESS)
1948 return rcStrict;
1949 uNewEip = uFrame.pu32[0];
1950 uNewCs = (uint16_t)uFrame.pu32[1];
1951 uNewFlags = uFrame.pu32[2];
1952 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1953 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1954 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1955 | X86_EFL_ID;
1956 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1957 }
1958 else
1959 {
1960 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1961 if (rcStrict != VINF_SUCCESS)
1962 return rcStrict;
1963 uNewEip = uFrame.pu16[0];
1964 uNewCs = uFrame.pu16[1];
1965 uNewFlags = uFrame.pu16[2];
1966 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1967 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1968 uNewFlags |= Efl.u & (UINT32_C(0xffff0000) | X86_EFL_1);
1969 /** @todo The intel pseudo code does not indicate what happens to
1970 * reserved flags. We just ignore them. */
1971 }
1972 /** @todo Check how this is supposed to work if sp=0xfffe. */
1973
1974 /*
1975 * Check the limit of the new EIP.
1976 */
1977 /** @todo Only the AMD pseudo code check the limit here, what's
1978 * right? */
1979 if (uNewEip > pCtx->cs.u32Limit)
1980 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1981
1982 /*
1983 * V8086 checks and flag adjustments
1984 */
1985 if (Efl.Bits.u1VM)
1986 {
1987 if (Efl.Bits.u2IOPL == 3)
1988 {
1989 /* Preserve IOPL and clear RF. */
1990 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1991 uNewFlags |= Efl.u & (X86_EFL_IOPL);
1992 }
1993 else if ( enmEffOpSize == IEMMODE_16BIT
1994 && ( !(uNewFlags & X86_EFL_IF)
1995 || !Efl.Bits.u1VIP )
1996 && !(uNewFlags & X86_EFL_TF) )
1997 {
1998 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1999 uNewFlags &= ~X86_EFL_VIF;
2000 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
2001 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
2002 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
2003 }
2004 else
2005 return iemRaiseGeneralProtectionFault0(pIemCpu);
2006 }
2007
2008 /*
2009 * Commit the operation.
2010 */
2011 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
2012 if (rcStrict != VINF_SUCCESS)
2013 return rcStrict;
2014 pCtx->rip = uNewEip;
2015 pCtx->cs.Sel = uNewCs;
2016 pCtx->cs.ValidSel = uNewCs;
2017 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2018 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2019 /** @todo do we load attribs and limit as well? */
2020 Assert(uNewFlags & X86_EFL_1);
2021 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags);
2022
2023 return VINF_SUCCESS;
2024}
2025
2026
2027/**
2028 * Loads a segment register when entering V8086 mode.
2029 *
2030 * @param pSReg The segment register.
2031 * @param uSeg The segment to load.
2032 */
2033static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
2034{
2035 pSReg->Sel = uSeg;
2036 pSReg->ValidSel = uSeg;
2037 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2038 pSReg->u64Base = (uint32_t)uSeg << 4;
2039 pSReg->u32Limit = 0xffff;
2040 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
2041 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
2042 * IRET'ing to V8086. */
2043}
2044
2045
2046/**
2047 * Implements iret for protected mode returning to V8086 mode.
2048 *
2049 * @param pCtx Pointer to the CPU context.
2050 * @param uNewEip The new EIP.
2051 * @param uNewCs The new CS.
2052 * @param uNewFlags The new EFLAGS.
2053 * @param uNewRsp The RSP after the initial IRET frame.
2054 */
2055IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uint16_t, uNewCs,
2056 uint32_t, uNewFlags, uint64_t, uNewRsp)
2057{
2058#if 0
2059 if (!LogIs6Enabled())
2060 {
2061 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
2062 RTLogFlags(NULL, "enabled");
2063 return VERR_IEM_RESTART_INSTRUCTION;
2064 }
2065#endif
2066
2067 /*
2068 * Pop the V8086 specific frame bits off the stack.
2069 */
2070 VBOXSTRICTRC rcStrict;
2071 RTCPTRUNION uFrame;
2072 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 24, &uFrame.pv, &uNewRsp);
2073 if (rcStrict != VINF_SUCCESS)
2074 return rcStrict;
2075 uint32_t uNewEsp = uFrame.pu32[0];
2076 uint16_t uNewSs = uFrame.pu32[1];
2077 uint16_t uNewEs = uFrame.pu32[2];
2078 uint16_t uNewDs = uFrame.pu32[3];
2079 uint16_t uNewFs = uFrame.pu32[4];
2080 uint16_t uNewGs = uFrame.pu32[5];
2081 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2082 if (rcStrict != VINF_SUCCESS)
2083 return rcStrict;
2084
2085 /*
2086 * Commit the operation.
2087 */
2088 iemCImplCommonV8086LoadSeg(&pCtx->cs, uNewCs);
2089 iemCImplCommonV8086LoadSeg(&pCtx->ss, uNewSs);
2090 iemCImplCommonV8086LoadSeg(&pCtx->es, uNewEs);
2091 iemCImplCommonV8086LoadSeg(&pCtx->ds, uNewDs);
2092 iemCImplCommonV8086LoadSeg(&pCtx->fs, uNewFs);
2093 iemCImplCommonV8086LoadSeg(&pCtx->gs, uNewGs);
2094 pCtx->rip = uNewEip;
2095 pCtx->rsp = uNewEsp;
2096 pCtx->rflags.u = uNewFlags;
2097 pIemCpu->uCpl = 3;
2098
2099 return VINF_SUCCESS;
2100}
2101
2102
2103/**
2104 * Implements iret for protected mode returning via a nested task.
2105 *
2106 * @param enmEffOpSize The effective operand size.
2107 */
2108IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
2109{
2110 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2111}
2112
2113
2114/**
2115 * Implements iret for protected mode
2116 *
2117 * @param enmEffOpSize The effective operand size.
2118 */
2119IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
2120{
2121 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2122 NOREF(cbInstr);
2123
2124 /*
2125 * Nested task return.
2126 */
2127 if (pCtx->eflags.Bits.u1NT)
2128 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
2129
2130 /*
2131 * Normal return.
2132 *
2133 * Do the stack bits, but don't commit RSP before everything checks
2134 * out right.
2135 */
2136 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2137 VBOXSTRICTRC rcStrict;
2138 RTCPTRUNION uFrame;
2139 uint16_t uNewCs;
2140 uint32_t uNewEip;
2141 uint32_t uNewFlags;
2142 uint64_t uNewRsp;
2143 if (enmEffOpSize == IEMMODE_32BIT)
2144 {
2145 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
2146 if (rcStrict != VINF_SUCCESS)
2147 return rcStrict;
2148 uNewEip = uFrame.pu32[0];
2149 uNewCs = (uint16_t)uFrame.pu32[1];
2150 uNewFlags = uFrame.pu32[2];
2151 }
2152 else
2153 {
2154 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
2155 if (rcStrict != VINF_SUCCESS)
2156 return rcStrict;
2157 uNewEip = uFrame.pu16[0];
2158 uNewCs = uFrame.pu16[1];
2159 uNewFlags = uFrame.pu16[2];
2160 }
2161 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2162 if (rcStrict != VINF_SUCCESS)
2163 return rcStrict;
2164
2165 /*
2166 * We're hopefully not returning to V8086 mode...
2167 */
2168 if ( (uNewFlags & X86_EFL_VM)
2169 && pIemCpu->uCpl == 0)
2170 {
2171 Assert(enmEffOpSize == IEMMODE_32BIT);
2172 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, pCtx, uNewEip, uNewCs, uNewFlags, uNewRsp);
2173 }
2174
2175 /*
2176 * Protected mode.
2177 */
2178 /* Read the CS descriptor. */
2179 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2180 {
2181 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
2182 return iemRaiseGeneralProtectionFault0(pIemCpu);
2183 }
2184
2185 IEMSELDESC DescCS;
2186 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
2187 if (rcStrict != VINF_SUCCESS)
2188 {
2189 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
2190 return rcStrict;
2191 }
2192
2193 /* Must be a code descriptor. */
2194 if (!DescCS.Legacy.Gen.u1DescType)
2195 {
2196 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2197 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2198 }
2199 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2200 {
2201 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2202 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2203 }
2204
2205 /* Privilege checks. */
2206 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2207 {
2208 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
2209 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2210 }
2211 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2212 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
2213 {
2214 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
2215 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2216 }
2217
2218 /* Present? */
2219 if (!DescCS.Legacy.Gen.u1Present)
2220 {
2221 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
2222 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2223 }
2224
2225 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2226
2227 /*
2228 * Return to outer level?
2229 */
2230 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2231 {
2232 uint16_t uNewSS;
2233 uint32_t uNewESP;
2234 if (enmEffOpSize == IEMMODE_32BIT)
2235 {
2236 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2237 if (rcStrict != VINF_SUCCESS)
2238 return rcStrict;
2239 uNewESP = uFrame.pu32[0];
2240 uNewSS = (uint16_t)uFrame.pu32[1];
2241 }
2242 else
2243 {
2244 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2245 if (rcStrict != VINF_SUCCESS)
2246 return rcStrict;
2247 uNewESP = uFrame.pu16[0];
2248 uNewSS = uFrame.pu16[1];
2249 }
2250 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
2251 if (rcStrict != VINF_SUCCESS)
2252 return rcStrict;
2253
2254 /* Read the SS descriptor. */
2255 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2256 {
2257 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
2258 return iemRaiseGeneralProtectionFault0(pIemCpu);
2259 }
2260
2261 IEMSELDESC DescSS;
2262 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
2263 if (rcStrict != VINF_SUCCESS)
2264 {
2265 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
2266 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
2267 return rcStrict;
2268 }
2269
2270 /* Privilege checks. */
2271 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2272 {
2273 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
2274 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2275 }
2276 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2277 {
2278 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
2279 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
2280 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2281 }
2282
2283 /* Must be a writeable data segment descriptor. */
2284 if (!DescSS.Legacy.Gen.u1DescType)
2285 {
2286 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
2287 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2288 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2289 }
2290 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2291 {
2292 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
2293 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2294 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2295 }
2296
2297 /* Present? */
2298 if (!DescSS.Legacy.Gen.u1Present)
2299 {
2300 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
2301 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
2302 }
2303
2304 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
2305
2306 /* Check EIP. */
2307 if (uNewEip > cbLimitCS)
2308 {
2309 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
2310 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
2311 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2312 }
2313
2314 /*
2315 * Commit the changes, marking CS and SS accessed first since
2316 * that may fail.
2317 */
2318 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2319 {
2320 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2321 if (rcStrict != VINF_SUCCESS)
2322 return rcStrict;
2323 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2324 }
2325 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2326 {
2327 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2328 if (rcStrict != VINF_SUCCESS)
2329 return rcStrict;
2330 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2331 }
2332
2333 pCtx->rip = uNewEip;
2334 pCtx->cs.Sel = uNewCs;
2335 pCtx->cs.ValidSel = uNewCs;
2336 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2337 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2338 pCtx->cs.u32Limit = cbLimitCS;
2339 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2340 pCtx->rsp = uNewESP;
2341 pCtx->ss.Sel = uNewSS;
2342 pCtx->ss.ValidSel = uNewSS;
2343 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2344 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2345 pCtx->ss.u32Limit = cbLimitSs;
2346 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2347
2348 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2349 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2350 if (enmEffOpSize != IEMMODE_16BIT)
2351 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2352 if (pIemCpu->uCpl == 0)
2353 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2354 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2355 fEFlagsMask |= X86_EFL_IF;
2356 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
2357 fEFlagsNew &= ~fEFlagsMask;
2358 fEFlagsNew |= uNewFlags & fEFlagsMask;
2359 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
2360
2361 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
2362 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2363 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2364 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2365 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2366
2367 /* Done! */
2368
2369 }
2370 /*
2371 * Return to the same level.
2372 */
2373 else
2374 {
2375 /* Check EIP. */
2376 if (uNewEip > cbLimitCS)
2377 {
2378 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
2379 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2380 }
2381
2382 /*
2383 * Commit the changes, marking CS first since it may fail.
2384 */
2385 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2386 {
2387 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2388 if (rcStrict != VINF_SUCCESS)
2389 return rcStrict;
2390 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2391 }
2392
2393 pCtx->rip = uNewEip;
2394 pCtx->cs.Sel = uNewCs;
2395 pCtx->cs.ValidSel = uNewCs;
2396 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2397 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2398 pCtx->cs.u32Limit = cbLimitCS;
2399 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2400 pCtx->rsp = uNewRsp;
2401
2402 X86EFLAGS NewEfl;
2403 NewEfl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
2404 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2405 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2406 if (enmEffOpSize != IEMMODE_16BIT)
2407 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2408 if (pIemCpu->uCpl == 0)
2409 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2410 else if (pIemCpu->uCpl <= NewEfl.Bits.u2IOPL)
2411 fEFlagsMask |= X86_EFL_IF;
2412 NewEfl.u &= ~fEFlagsMask;
2413 NewEfl.u |= fEFlagsMask & uNewFlags;
2414 IEMMISC_SET_EFL(pIemCpu, pCtx, NewEfl.u);
2415 /* Done! */
2416 }
2417 return VINF_SUCCESS;
2418}
2419
2420
2421/**
2422 * Implements iret for long mode
2423 *
2424 * @param enmEffOpSize The effective operand size.
2425 */
2426IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
2427{
2428 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2429 NOREF(cbInstr);
2430
2431 /*
2432 * Nested task return is not supported in long mode.
2433 */
2434 if (pCtx->eflags.Bits.u1NT)
2435 {
2436 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pCtx->eflags.u));
2437 return iemRaiseGeneralProtectionFault0(pIemCpu);
2438 }
2439
2440 /*
2441 * Normal return.
2442 *
2443 * Do the stack bits, but don't commit RSP before everything checks
2444 * out right.
2445 */
2446 VBOXSTRICTRC rcStrict;
2447 RTCPTRUNION uFrame;
2448 uint64_t uNewRip;
2449 uint16_t uNewCs;
2450 uint16_t uNewSs;
2451 uint32_t uNewFlags;
2452 uint64_t uNewRsp;
2453 if (enmEffOpSize == IEMMODE_64BIT)
2454 {
2455 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*8, &uFrame.pv, &uNewRsp);
2456 if (rcStrict != VINF_SUCCESS)
2457 return rcStrict;
2458 uNewRip = uFrame.pu64[0];
2459 uNewCs = (uint16_t)uFrame.pu64[1];
2460 uNewFlags = (uint32_t)uFrame.pu64[2];
2461 uNewRsp = uFrame.pu64[3];
2462 uNewSs = (uint16_t)uFrame.pu64[4];
2463 }
2464 else if (enmEffOpSize == IEMMODE_32BIT)
2465 {
2466 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*4, &uFrame.pv, &uNewRsp);
2467 if (rcStrict != VINF_SUCCESS)
2468 return rcStrict;
2469 uNewRip = uFrame.pu32[0];
2470 uNewCs = (uint16_t)uFrame.pu32[1];
2471 uNewFlags = uFrame.pu32[2];
2472 uNewRsp = uFrame.pu32[3];
2473 uNewSs = (uint16_t)uFrame.pu32[4];
2474 }
2475 else
2476 {
2477 Assert(enmEffOpSize == IEMMODE_16BIT);
2478 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*2, &uFrame.pv, &uNewRsp);
2479 if (rcStrict != VINF_SUCCESS)
2480 return rcStrict;
2481 uNewRip = uFrame.pu16[0];
2482 uNewCs = uFrame.pu16[1];
2483 uNewFlags = uFrame.pu16[2];
2484 uNewRsp = uFrame.pu16[3];
2485 uNewSs = uFrame.pu16[4];
2486 }
2487 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2488 if (rcStrict != VINF_SUCCESS)
2489 return rcStrict;
2490
2491 /*
2492 * Check stuff.
2493 */
2494 /* Read the CS descriptor. */
2495 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2496 {
2497 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
2498 return iemRaiseGeneralProtectionFault0(pIemCpu);
2499 }
2500
2501 IEMSELDESC DescCS;
2502 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
2503 if (rcStrict != VINF_SUCCESS)
2504 {
2505 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
2506 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
2507 return rcStrict;
2508 }
2509
2510 /* Must be a code descriptor. */
2511 if ( !DescCS.Legacy.Gen.u1DescType
2512 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2513 {
2514 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
2515 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2516 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2517 }
2518
2519 /* Privilege checks. */
2520 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
2521 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2522 {
2523 Log(("iret %04x:%016RX64/%04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp, pIemCpu->uCpl));
2524 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2525 }
2526 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2527 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
2528 {
2529 Log(("iret %04x:%016RX64/%04x:%016RX64 - RPL < DPL (%d) -> #GP\n",
2530 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u2Dpl));
2531 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2532 }
2533
2534 /* Present? */
2535 if (!DescCS.Legacy.Gen.u1Present)
2536 {
2537 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
2538 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2539 }
2540
2541 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2542
2543 /* Read the SS descriptor. */
2544 IEMSELDESC DescSS;
2545 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
2546 {
2547 if ( !DescCS.Legacy.Gen.u1Long
2548 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
2549 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
2550 {
2551 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
2552 return iemRaiseGeneralProtectionFault0(pIemCpu);
2553 }
2554 DescSS.Legacy.u = 0;
2555 }
2556 else
2557 {
2558 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSs);
2559 if (rcStrict != VINF_SUCCESS)
2560 {
2561 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
2562 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
2563 return rcStrict;
2564 }
2565 }
2566
2567 /* Privilege checks. */
2568 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2569 {
2570 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
2571 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
2572 }
2573
2574 uint32_t cbLimitSs;
2575 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
2576 cbLimitSs = UINT32_MAX;
2577 else
2578 {
2579 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2580 {
2581 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
2582 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
2583 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
2584 }
2585
2586 /* Must be a writeable data segment descriptor. */
2587 if (!DescSS.Legacy.Gen.u1DescType)
2588 {
2589 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
2590 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
2591 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
2592 }
2593 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2594 {
2595 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
2596 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
2597 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
2598 }
2599
2600 /* Present? */
2601 if (!DescSS.Legacy.Gen.u1Present)
2602 {
2603 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
2604 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSs);
2605 }
2606 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
2607 }
2608
2609 /* Check EIP. */
2610 if (DescCS.Legacy.Gen.u1Long)
2611 {
2612 if (!IEM_IS_CANONICAL(uNewRip))
2613 {
2614 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
2615 uNewCs, uNewRip, uNewSs, uNewRsp));
2616 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2617 }
2618 }
2619 else
2620 {
2621 if (uNewRip > cbLimitCS)
2622 {
2623 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
2624 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
2625 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2626 }
2627 }
2628
2629 /*
2630 * Commit the changes, marking CS and SS accessed first since
2631 * that may fail.
2632 */
2633 /** @todo where exactly are these actually marked accessed by a real CPU? */
2634 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2635 {
2636 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2637 if (rcStrict != VINF_SUCCESS)
2638 return rcStrict;
2639 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2640 }
2641 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2642 {
2643 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSs);
2644 if (rcStrict != VINF_SUCCESS)
2645 return rcStrict;
2646 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2647 }
2648
2649 pCtx->rip = uNewRip;
2650 pCtx->cs.Sel = uNewCs;
2651 pCtx->cs.ValidSel = uNewCs;
2652 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2653 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2654 pCtx->cs.u32Limit = cbLimitCS;
2655 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2656 pCtx->rsp = uNewRsp;
2657 pCtx->ss.Sel = uNewSs;
2658 pCtx->ss.ValidSel = uNewSs;
2659 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
2660 {
2661 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2662 pCtx->ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
2663 pCtx->ss.u32Limit = UINT32_MAX;
2664 pCtx->ss.u64Base = 0;
2665 }
2666 else
2667 {
2668 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2669 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2670 pCtx->ss.u32Limit = cbLimitSs;
2671 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2672 }
2673
2674 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2675 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2676 if (enmEffOpSize != IEMMODE_16BIT)
2677 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2678 if (pIemCpu->uCpl == 0)
2679 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
2680 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2681 fEFlagsMask |= X86_EFL_IF;
2682 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
2683 fEFlagsNew &= ~fEFlagsMask;
2684 fEFlagsNew |= uNewFlags & fEFlagsMask;
2685 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
2686
2687 if (pIemCpu->uCpl != uNewCpl)
2688 {
2689 pIemCpu->uCpl = uNewCpl;
2690 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->ds);
2691 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->es);
2692 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->fs);
2693 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->gs);
2694 }
2695
2696 return VINF_SUCCESS;
2697}
2698
2699
2700/**
2701 * Implements iret.
2702 *
2703 * @param enmEffOpSize The effective operand size.
2704 */
2705IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
2706{
2707 /*
2708 * Call a mode specific worker.
2709 */
2710 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2711 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2712 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
2713 if (IEM_IS_LONG_MODE(pIemCpu))
2714 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
2715
2716 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
2717}
2718
2719
2720/**
2721 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
2722 *
2723 * @param iSegReg The segment register number (valid).
2724 * @param uSel The new selector value.
2725 */
2726IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
2727{
2728 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2729 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
2730 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
2731
2732 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
2733
2734 /*
2735 * Real mode and V8086 mode are easy.
2736 */
2737 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2738 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2739 {
2740 *pSel = uSel;
2741 pHid->u64Base = (uint32_t)uSel << 4;
2742 pHid->ValidSel = uSel;
2743 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
2744#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
2745 /** @todo Does the CPU actually load limits and attributes in the
2746 * real/V8086 mode segment load case? It doesn't for CS in far
2747 * jumps... Affects unreal mode. */
2748 pHid->u32Limit = 0xffff;
2749 pHid->Attr.u = 0;
2750 pHid->Attr.n.u1Present = 1;
2751 pHid->Attr.n.u1DescType = 1;
2752 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
2753 ? X86_SEL_TYPE_RW
2754 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
2755#endif
2756 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2757 iemRegAddToRip(pIemCpu, cbInstr);
2758 return VINF_SUCCESS;
2759 }
2760
2761 /*
2762 * Protected mode.
2763 *
2764 * Check if it's a null segment selector value first, that's OK for DS, ES,
2765 * FS and GS. If not null, then we have to load and parse the descriptor.
2766 */
2767 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2768 {
2769 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
2770 if (iSegReg == X86_SREG_SS)
2771 {
2772 /* In 64-bit kernel mode, the stack can be 0 because of the way
2773 interrupts are dispatched. AMD seems to have a slighly more
2774 relaxed relationship to SS.RPL than intel does. */
2775 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
2776 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
2777 || pIemCpu->uCpl > 2
2778 || ( uSel != pIemCpu->uCpl
2779 && !IEM_IS_GUEST_CPU_AMD(pIemCpu)) )
2780 {
2781 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
2782 return iemRaiseGeneralProtectionFault0(pIemCpu);
2783 }
2784 }
2785
2786 *pSel = uSel; /* Not RPL, remember :-) */
2787 iemHlpLoadNullDataSelectorProt(pHid, uSel);
2788 if (iSegReg == X86_SREG_SS)
2789 pHid->Attr.u |= pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT;
2790
2791 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
2792 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2793
2794 iemRegAddToRip(pIemCpu, cbInstr);
2795 return VINF_SUCCESS;
2796 }
2797
2798 /* Fetch the descriptor. */
2799 IEMSELDESC Desc;
2800 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
2801 if (rcStrict != VINF_SUCCESS)
2802 return rcStrict;
2803
2804 /* Check GPs first. */
2805 if (!Desc.Legacy.Gen.u1DescType)
2806 {
2807 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
2808 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2809 }
2810 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
2811 {
2812 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2813 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2814 {
2815 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2816 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2817 }
2818 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
2819 {
2820 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
2821 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2822 }
2823 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
2824 {
2825 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2826 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2827 }
2828 }
2829 else
2830 {
2831 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2832 {
2833 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
2834 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2835 }
2836 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2837 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2838 {
2839#if 0 /* this is what intel says. */
2840 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2841 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2842 {
2843 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
2844 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2845 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2846 }
2847#else /* this is what makes more sense. */
2848 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
2849 {
2850 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
2851 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
2852 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2853 }
2854 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2855 {
2856 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
2857 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2858 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2859 }
2860#endif
2861 }
2862 }
2863
2864 /* Is it there? */
2865 if (!Desc.Legacy.Gen.u1Present)
2866 {
2867 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
2868 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
2869 }
2870
2871 /* The base and limit. */
2872 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2873 uint64_t u64Base;
2874 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2875 && iSegReg < X86_SREG_FS)
2876 u64Base = 0;
2877 else
2878 u64Base = X86DESC_BASE(&Desc.Legacy);
2879
2880 /*
2881 * Ok, everything checked out fine. Now set the accessed bit before
2882 * committing the result into the registers.
2883 */
2884 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2885 {
2886 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2887 if (rcStrict != VINF_SUCCESS)
2888 return rcStrict;
2889 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2890 }
2891
2892 /* commit */
2893 *pSel = uSel;
2894 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2895 pHid->u32Limit = cbLimit;
2896 pHid->u64Base = u64Base;
2897 pHid->ValidSel = uSel;
2898 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
2899
2900 /** @todo check if the hidden bits are loaded correctly for 64-bit
2901 * mode. */
2902 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
2903
2904 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2905 iemRegAddToRip(pIemCpu, cbInstr);
2906 return VINF_SUCCESS;
2907}
2908
2909
2910/**
2911 * Implements 'mov SReg, r/m'.
2912 *
2913 * @param iSegReg The segment register number (valid).
2914 * @param uSel The new selector value.
2915 */
2916IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
2917{
2918 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2919 if (rcStrict == VINF_SUCCESS)
2920 {
2921 if (iSegReg == X86_SREG_SS)
2922 {
2923 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2924 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2925 }
2926 }
2927 return rcStrict;
2928}
2929
2930
2931/**
2932 * Implements 'pop SReg'.
2933 *
2934 * @param iSegReg The segment register number (valid).
2935 * @param enmEffOpSize The efficient operand size (valid).
2936 */
2937IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
2938{
2939 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2940 VBOXSTRICTRC rcStrict;
2941
2942 /*
2943 * Read the selector off the stack and join paths with mov ss, reg.
2944 */
2945 RTUINT64U TmpRsp;
2946 TmpRsp.u = pCtx->rsp;
2947 switch (enmEffOpSize)
2948 {
2949 case IEMMODE_16BIT:
2950 {
2951 uint16_t uSel;
2952 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
2953 if (rcStrict == VINF_SUCCESS)
2954 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2955 break;
2956 }
2957
2958 case IEMMODE_32BIT:
2959 {
2960 uint32_t u32Value;
2961 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
2962 if (rcStrict == VINF_SUCCESS)
2963 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
2964 break;
2965 }
2966
2967 case IEMMODE_64BIT:
2968 {
2969 uint64_t u64Value;
2970 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
2971 if (rcStrict == VINF_SUCCESS)
2972 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
2973 break;
2974 }
2975 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2976 }
2977
2978 /*
2979 * Commit the stack on success.
2980 */
2981 if (rcStrict == VINF_SUCCESS)
2982 {
2983 pCtx->rsp = TmpRsp.u;
2984 if (iSegReg == X86_SREG_SS)
2985 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2986 }
2987 return rcStrict;
2988}
2989
2990
2991/**
2992 * Implements lgs, lfs, les, lds & lss.
2993 */
2994IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
2995 uint16_t, uSel,
2996 uint64_t, offSeg,
2997 uint8_t, iSegReg,
2998 uint8_t, iGReg,
2999 IEMMODE, enmEffOpSize)
3000{
3001 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
3002 VBOXSTRICTRC rcStrict;
3003
3004 /*
3005 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
3006 */
3007 /** @todo verify and test that mov, pop and lXs works the segment
3008 * register loading in the exact same way. */
3009 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
3010 if (rcStrict == VINF_SUCCESS)
3011 {
3012 switch (enmEffOpSize)
3013 {
3014 case IEMMODE_16BIT:
3015 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
3016 break;
3017 case IEMMODE_32BIT:
3018 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
3019 break;
3020 case IEMMODE_64BIT:
3021 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
3022 break;
3023 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3024 }
3025 }
3026
3027 return rcStrict;
3028}
3029
3030
3031/**
3032 * Implements lgdt.
3033 *
3034 * @param iEffSeg The segment of the new ldtr contents
3035 * @param GCPtrEffSrc The address of the new ldtr contents.
3036 * @param enmEffOpSize The effective operand size.
3037 */
3038IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
3039{
3040 if (pIemCpu->uCpl != 0)
3041 return iemRaiseGeneralProtectionFault0(pIemCpu);
3042 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3043
3044 /*
3045 * Fetch the limit and base address.
3046 */
3047 uint16_t cbLimit;
3048 RTGCPTR GCPtrBase;
3049 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
3050 if (rcStrict == VINF_SUCCESS)
3051 {
3052 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3053 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
3054 else
3055 {
3056 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3057 pCtx->gdtr.cbGdt = cbLimit;
3058 pCtx->gdtr.pGdt = GCPtrBase;
3059 }
3060 if (rcStrict == VINF_SUCCESS)
3061 iemRegAddToRip(pIemCpu, cbInstr);
3062 }
3063 return rcStrict;
3064}
3065
3066
3067/**
3068 * Implements sgdt.
3069 *
3070 * @param iEffSeg The segment where to store the gdtr content.
3071 * @param GCPtrEffDst The address where to store the gdtr content.
3072 * @param enmEffOpSize The effective operand size.
3073 */
3074IEM_CIMPL_DEF_3(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
3075{
3076 /*
3077 * Join paths with sidt.
3078 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
3079 * you really must know.
3080 */
3081 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3082 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
3083 if (rcStrict == VINF_SUCCESS)
3084 iemRegAddToRip(pIemCpu, cbInstr);
3085 return rcStrict;
3086}
3087
3088
3089/**
3090 * Implements lidt.
3091 *
3092 * @param iEffSeg The segment of the new ldtr contents
3093 * @param GCPtrEffSrc The address of the new ldtr contents.
3094 * @param enmEffOpSize The effective operand size.
3095 */
3096IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
3097{
3098 if (pIemCpu->uCpl != 0)
3099 return iemRaiseGeneralProtectionFault0(pIemCpu);
3100 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3101
3102 /*
3103 * Fetch the limit and base address.
3104 */
3105 uint16_t cbLimit;
3106 RTGCPTR GCPtrBase;
3107 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
3108 if (rcStrict == VINF_SUCCESS)
3109 {
3110 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3111 CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
3112 else
3113 {
3114 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3115 pCtx->idtr.cbIdt = cbLimit;
3116 pCtx->idtr.pIdt = GCPtrBase;
3117 }
3118 iemRegAddToRip(pIemCpu, cbInstr);
3119 }
3120 return rcStrict;
3121}
3122
3123
3124/**
3125 * Implements sidt.
3126 *
3127 * @param iEffSeg The segment where to store the idtr content.
3128 * @param GCPtrEffDst The address where to store the idtr content.
3129 * @param enmEffOpSize The effective operand size.
3130 */
3131IEM_CIMPL_DEF_3(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
3132{
3133 /*
3134 * Join paths with sgdt.
3135 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
3136 * you really must know.
3137 */
3138 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3139 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
3140 if (rcStrict == VINF_SUCCESS)
3141 iemRegAddToRip(pIemCpu, cbInstr);
3142 return rcStrict;
3143}
3144
3145
3146/**
3147 * Implements lldt.
3148 *
3149 * @param uNewLdt The new LDT selector value.
3150 */
3151IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
3152{
3153 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3154
3155 /*
3156 * Check preconditions.
3157 */
3158 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3159 {
3160 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
3161 return iemRaiseUndefinedOpcode(pIemCpu);
3162 }
3163 if (pIemCpu->uCpl != 0)
3164 {
3165 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
3166 return iemRaiseGeneralProtectionFault0(pIemCpu);
3167 }
3168 if (uNewLdt & X86_SEL_LDT)
3169 {
3170 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
3171 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
3172 }
3173
3174 /*
3175 * Now, loading a NULL selector is easy.
3176 */
3177 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
3178 {
3179 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
3180 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3181 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt);
3182 else
3183 pCtx->ldtr.Sel = uNewLdt;
3184 pCtx->ldtr.ValidSel = uNewLdt;
3185 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3186 if (IEM_IS_GUEST_CPU_AMD(pIemCpu) && !IEM_VERIFICATION_ENABLED(pIemCpu))
3187 pCtx->ldtr.Attr.u = 0;
3188 else
3189 {
3190 pCtx->ldtr.u64Base = 0;
3191 pCtx->ldtr.u32Limit = 0;
3192 }
3193
3194 iemRegAddToRip(pIemCpu, cbInstr);
3195 return VINF_SUCCESS;
3196 }
3197
3198 /*
3199 * Read the descriptor.
3200 */
3201 IEMSELDESC Desc;
3202 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
3203 if (rcStrict != VINF_SUCCESS)
3204 return rcStrict;
3205
3206 /* Check GPs first. */
3207 if (Desc.Legacy.Gen.u1DescType)
3208 {
3209 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
3210 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3211 }
3212 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
3213 {
3214 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
3215 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3216 }
3217 uint64_t u64Base;
3218 if (!IEM_IS_LONG_MODE(pIemCpu))
3219 u64Base = X86DESC_BASE(&Desc.Legacy);
3220 else
3221 {
3222 if (Desc.Long.Gen.u5Zeros)
3223 {
3224 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
3225 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3226 }
3227
3228 u64Base = X86DESC64_BASE(&Desc.Long);
3229 if (!IEM_IS_CANONICAL(u64Base))
3230 {
3231 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
3232 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3233 }
3234 }
3235
3236 /* NP */
3237 if (!Desc.Legacy.Gen.u1Present)
3238 {
3239 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
3240 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
3241 }
3242
3243 /*
3244 * It checks out alright, update the registers.
3245 */
3246/** @todo check if the actual value is loaded or if the RPL is dropped */
3247 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3248 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK_OFF_RPL);
3249 else
3250 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
3251 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
3252 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3253 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3254 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
3255 pCtx->ldtr.u64Base = u64Base;
3256
3257 iemRegAddToRip(pIemCpu, cbInstr);
3258 return VINF_SUCCESS;
3259}
3260
3261
3262/**
3263 * Implements lldt.
3264 *
3265 * @param uNewLdt The new LDT selector value.
3266 */
3267IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
3268{
3269 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3270
3271 /*
3272 * Check preconditions.
3273 */
3274 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3275 {
3276 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
3277 return iemRaiseUndefinedOpcode(pIemCpu);
3278 }
3279 if (pIemCpu->uCpl != 0)
3280 {
3281 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
3282 return iemRaiseGeneralProtectionFault0(pIemCpu);
3283 }
3284 if (uNewTr & X86_SEL_LDT)
3285 {
3286 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
3287 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
3288 }
3289 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
3290 {
3291 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
3292 return iemRaiseGeneralProtectionFault0(pIemCpu);
3293 }
3294
3295 /*
3296 * Read the descriptor.
3297 */
3298 IEMSELDESC Desc;
3299 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
3300 if (rcStrict != VINF_SUCCESS)
3301 return rcStrict;
3302
3303 /* Check GPs first. */
3304 if (Desc.Legacy.Gen.u1DescType)
3305 {
3306 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
3307 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3308 }
3309 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
3310 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3311 || IEM_IS_LONG_MODE(pIemCpu)) )
3312 {
3313 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
3314 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3315 }
3316 uint64_t u64Base;
3317 if (!IEM_IS_LONG_MODE(pIemCpu))
3318 u64Base = X86DESC_BASE(&Desc.Legacy);
3319 else
3320 {
3321 if (Desc.Long.Gen.u5Zeros)
3322 {
3323 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
3324 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3325 }
3326
3327 u64Base = X86DESC64_BASE(&Desc.Long);
3328 if (!IEM_IS_CANONICAL(u64Base))
3329 {
3330 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
3331 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3332 }
3333 }
3334
3335 /* NP */
3336 if (!Desc.Legacy.Gen.u1Present)
3337 {
3338 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
3339 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
3340 }
3341
3342 /*
3343 * Set it busy.
3344 * Note! Intel says this should lock down the whole descriptor, but we'll
3345 * restrict our selves to 32-bit for now due to lack of inline
3346 * assembly and such.
3347 */
3348 void *pvDesc;
3349 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW);
3350 if (rcStrict != VINF_SUCCESS)
3351 return rcStrict;
3352 switch ((uintptr_t)pvDesc & 3)
3353 {
3354 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
3355 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
3356 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
3357 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
3358 }
3359 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvDesc, IEM_ACCESS_DATA_RW);
3360 if (rcStrict != VINF_SUCCESS)
3361 return rcStrict;
3362 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3363
3364 /*
3365 * It checks out alright, update the registers.
3366 */
3367/** @todo check if the actual value is loaded or if the RPL is dropped */
3368 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3369 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK_OFF_RPL);
3370 else
3371 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
3372 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
3373 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3374 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3375 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
3376 pCtx->tr.u64Base = u64Base;
3377
3378 iemRegAddToRip(pIemCpu, cbInstr);
3379 return VINF_SUCCESS;
3380}
3381
3382
3383/**
3384 * Implements mov GReg,CRx.
3385 *
3386 * @param iGReg The general register to store the CRx value in.
3387 * @param iCrReg The CRx register to read (valid).
3388 */
3389IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
3390{
3391 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3392 if (pIemCpu->uCpl != 0)
3393 return iemRaiseGeneralProtectionFault0(pIemCpu);
3394 Assert(!pCtx->eflags.Bits.u1VM);
3395
3396 /* read it */
3397 uint64_t crX;
3398 switch (iCrReg)
3399 {
3400 case 0: crX = pCtx->cr0; break;
3401 case 2: crX = pCtx->cr2; break;
3402 case 3: crX = pCtx->cr3; break;
3403 case 4: crX = pCtx->cr4; break;
3404 case 8:
3405 {
3406 uint8_t uTpr;
3407 int rc = PDMApicGetTPR(IEMCPU_TO_VMCPU(pIemCpu), &uTpr, NULL, NULL);
3408 if (RT_SUCCESS(rc))
3409 crX = uTpr >> 4;
3410 else
3411 crX = 0;
3412 break;
3413 }
3414 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3415 }
3416
3417 /* store it */
3418 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3419 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
3420 else
3421 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
3422
3423 iemRegAddToRip(pIemCpu, cbInstr);
3424 return VINF_SUCCESS;
3425}
3426
3427
3428/**
3429 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
3430 *
3431 * @param iCrReg The CRx register to write (valid).
3432 * @param uNewCrX The new value.
3433 */
3434IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
3435{
3436 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3437 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3438 VBOXSTRICTRC rcStrict;
3439 int rc;
3440
3441 /*
3442 * Try store it.
3443 * Unfortunately, CPUM only does a tiny bit of the work.
3444 */
3445 switch (iCrReg)
3446 {
3447 case 0:
3448 {
3449 /*
3450 * Perform checks.
3451 */
3452 uint64_t const uOldCrX = pCtx->cr0;
3453 uNewCrX |= X86_CR0_ET; /* hardcoded */
3454
3455 /* Check for reserved bits. */
3456 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
3457 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
3458 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
3459 if (uNewCrX & ~(uint64_t)fValid)
3460 {
3461 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3462 return iemRaiseGeneralProtectionFault0(pIemCpu);
3463 }
3464
3465 /* Check for invalid combinations. */
3466 if ( (uNewCrX & X86_CR0_PG)
3467 && !(uNewCrX & X86_CR0_PE) )
3468 {
3469 Log(("Trying to set CR0.PG without CR0.PE\n"));
3470 return iemRaiseGeneralProtectionFault0(pIemCpu);
3471 }
3472
3473 if ( !(uNewCrX & X86_CR0_CD)
3474 && (uNewCrX & X86_CR0_NW) )
3475 {
3476 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
3477 return iemRaiseGeneralProtectionFault0(pIemCpu);
3478 }
3479
3480 /* Long mode consistency checks. */
3481 if ( (uNewCrX & X86_CR0_PG)
3482 && !(uOldCrX & X86_CR0_PG)
3483 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3484 {
3485 if (!(pCtx->cr4 & X86_CR4_PAE))
3486 {
3487 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
3488 return iemRaiseGeneralProtectionFault0(pIemCpu);
3489 }
3490 if (pCtx->cs.Attr.n.u1Long)
3491 {
3492 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
3493 return iemRaiseGeneralProtectionFault0(pIemCpu);
3494 }
3495 }
3496
3497 /** @todo check reserved PDPTR bits as AMD states. */
3498
3499 /*
3500 * Change CR0.
3501 */
3502 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3503 CPUMSetGuestCR0(pVCpu, uNewCrX);
3504 else
3505 pCtx->cr0 = uNewCrX;
3506 Assert(pCtx->cr0 == uNewCrX);
3507
3508 /*
3509 * Change EFER.LMA if entering or leaving long mode.
3510 */
3511 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
3512 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3513 {
3514 uint64_t NewEFER = pCtx->msrEFER;
3515 if (uNewCrX & X86_CR0_PG)
3516 NewEFER |= MSR_K6_EFER_LMA;
3517 else
3518 NewEFER &= ~MSR_K6_EFER_LMA;
3519
3520 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3521 CPUMSetGuestEFER(pVCpu, NewEFER);
3522 else
3523 pCtx->msrEFER = NewEFER;
3524 Assert(pCtx->msrEFER == NewEFER);
3525 }
3526
3527 /*
3528 * Inform PGM.
3529 */
3530 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3531 {
3532 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3533 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3534 {
3535 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3536 AssertRCReturn(rc, rc);
3537 /* ignore informational status codes */
3538 }
3539 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3540 }
3541 else
3542 rcStrict = VINF_SUCCESS;
3543
3544#ifdef IN_RC
3545 /* Return to ring-3 for rescheduling if WP or AM changes. */
3546 if ( rcStrict == VINF_SUCCESS
3547 && ( (uNewCrX & (X86_CR0_WP | X86_CR0_AM))
3548 != (uOldCrX & (X86_CR0_WP | X86_CR0_AM))) )
3549 rcStrict = VINF_EM_RESCHEDULE;
3550#endif
3551 break;
3552 }
3553
3554 /*
3555 * CR2 can be changed without any restrictions.
3556 */
3557 case 2:
3558 pCtx->cr2 = uNewCrX;
3559 rcStrict = VINF_SUCCESS;
3560 break;
3561
3562 /*
3563 * CR3 is relatively simple, although AMD and Intel have different
3564 * accounts of how setting reserved bits are handled. We take intel's
3565 * word for the lower bits and AMD's for the high bits (63:52).
3566 */
3567 /** @todo Testcase: Setting reserved bits in CR3, especially before
3568 * enabling paging. */
3569 case 3:
3570 {
3571 /* check / mask the value. */
3572 if (uNewCrX & UINT64_C(0xfff0000000000000))
3573 {
3574 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
3575 return iemRaiseGeneralProtectionFault0(pIemCpu);
3576 }
3577
3578 uint64_t fValid;
3579 if ( (pCtx->cr4 & X86_CR4_PAE)
3580 && (pCtx->msrEFER & MSR_K6_EFER_LME))
3581 fValid = UINT64_C(0x000ffffffffff014);
3582 else if (pCtx->cr4 & X86_CR4_PAE)
3583 fValid = UINT64_C(0xfffffff4);
3584 else
3585 fValid = UINT64_C(0xfffff014);
3586 if (uNewCrX & ~fValid)
3587 {
3588 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
3589 uNewCrX, uNewCrX & ~fValid));
3590 uNewCrX &= fValid;
3591 }
3592
3593 /** @todo If we're in PAE mode we should check the PDPTRs for
3594 * invalid bits. */
3595
3596 /* Make the change. */
3597 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3598 {
3599 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
3600 AssertRCSuccessReturn(rc, rc);
3601 }
3602 else
3603 pCtx->cr3 = uNewCrX;
3604
3605 /* Inform PGM. */
3606 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3607 {
3608 if (pCtx->cr0 & X86_CR0_PG)
3609 {
3610 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
3611 AssertRCReturn(rc, rc);
3612 /* ignore informational status codes */
3613 }
3614 }
3615 rcStrict = VINF_SUCCESS;
3616 break;
3617 }
3618
3619 /*
3620 * CR4 is a bit more tedious as there are bits which cannot be cleared
3621 * under some circumstances and such.
3622 */
3623 case 4:
3624 {
3625 uint64_t const uOldCrX = pCtx->cr4;
3626
3627 /* reserved bits */
3628 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
3629 | X86_CR4_TSD | X86_CR4_DE
3630 | X86_CR4_PSE | X86_CR4_PAE
3631 | X86_CR4_MCE | X86_CR4_PGE
3632 | X86_CR4_PCE | X86_CR4_OSFSXR
3633 | X86_CR4_OSXMMEEXCPT;
3634 //if (xxx)
3635 // fValid |= X86_CR4_VMXE;
3636 //if (xxx)
3637 // fValid |= X86_CR4_OSXSAVE;
3638 if (uNewCrX & ~(uint64_t)fValid)
3639 {
3640 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3641 return iemRaiseGeneralProtectionFault0(pIemCpu);
3642 }
3643
3644 /* long mode checks. */
3645 if ( (uOldCrX & X86_CR4_PAE)
3646 && !(uNewCrX & X86_CR4_PAE)
3647 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
3648 {
3649 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
3650 return iemRaiseGeneralProtectionFault0(pIemCpu);
3651 }
3652
3653
3654 /*
3655 * Change it.
3656 */
3657 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3658 {
3659 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
3660 AssertRCSuccessReturn(rc, rc);
3661 }
3662 else
3663 pCtx->cr4 = uNewCrX;
3664 Assert(pCtx->cr4 == uNewCrX);
3665
3666 /*
3667 * Notify SELM and PGM.
3668 */
3669 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3670 {
3671 /* SELM - VME may change things wrt to the TSS shadowing. */
3672 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
3673 {
3674 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
3675 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
3676#ifdef VBOX_WITH_RAW_MODE
3677 if (!HMIsEnabled(IEMCPU_TO_VM(pIemCpu)))
3678 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3679#endif
3680 }
3681
3682 /* PGM - flushing and mode. */
3683 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE))
3684 {
3685 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3686 AssertRCReturn(rc, rc);
3687 /* ignore informational status codes */
3688 }
3689 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3690 }
3691 else
3692 rcStrict = VINF_SUCCESS;
3693 break;
3694 }
3695
3696 /*
3697 * CR8 maps to the APIC TPR.
3698 */
3699 case 8:
3700 if (uNewCrX & ~(uint64_t)0xf)
3701 {
3702 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
3703 return iemRaiseGeneralProtectionFault0(pIemCpu);
3704 }
3705
3706 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3707 PDMApicSetTPR(IEMCPU_TO_VMCPU(pIemCpu), (uint8_t)uNewCrX << 4);
3708 rcStrict = VINF_SUCCESS;
3709 break;
3710
3711 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3712 }
3713
3714 /*
3715 * Advance the RIP on success.
3716 */
3717 if (RT_SUCCESS(rcStrict))
3718 {
3719 if (rcStrict != VINF_SUCCESS)
3720 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3721 iemRegAddToRip(pIemCpu, cbInstr);
3722 }
3723
3724 return rcStrict;
3725}
3726
3727
3728/**
3729 * Implements mov CRx,GReg.
3730 *
3731 * @param iCrReg The CRx register to write (valid).
3732 * @param iGReg The general register to load the DRx value from.
3733 */
3734IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
3735{
3736 if (pIemCpu->uCpl != 0)
3737 return iemRaiseGeneralProtectionFault0(pIemCpu);
3738 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3739
3740 /*
3741 * Read the new value from the source register and call common worker.
3742 */
3743 uint64_t uNewCrX;
3744 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3745 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
3746 else
3747 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
3748 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
3749}
3750
3751
3752/**
3753 * Implements 'LMSW r/m16'
3754 *
3755 * @param u16NewMsw The new value.
3756 */
3757IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
3758{
3759 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3760
3761 if (pIemCpu->uCpl != 0)
3762 return iemRaiseGeneralProtectionFault0(pIemCpu);
3763 Assert(!pCtx->eflags.Bits.u1VM);
3764
3765 /*
3766 * Compose the new CR0 value and call common worker.
3767 */
3768 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3769 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3770 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3771}
3772
3773
3774/**
3775 * Implements 'CLTS'.
3776 */
3777IEM_CIMPL_DEF_0(iemCImpl_clts)
3778{
3779 if (pIemCpu->uCpl != 0)
3780 return iemRaiseGeneralProtectionFault0(pIemCpu);
3781
3782 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3783 uint64_t uNewCr0 = pCtx->cr0;
3784 uNewCr0 &= ~X86_CR0_TS;
3785 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3786}
3787
3788
3789/**
3790 * Implements mov GReg,DRx.
3791 *
3792 * @param iGReg The general register to store the DRx value in.
3793 * @param iDrReg The DRx register to read (0-7).
3794 */
3795IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
3796{
3797 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3798
3799 /*
3800 * Check preconditions.
3801 */
3802
3803 /* Raise GPs. */
3804 if (pIemCpu->uCpl != 0)
3805 return iemRaiseGeneralProtectionFault0(pIemCpu);
3806 Assert(!pCtx->eflags.Bits.u1VM);
3807
3808 if ( (iDrReg == 4 || iDrReg == 5)
3809 && (pCtx->cr4 & X86_CR4_DE) )
3810 {
3811 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
3812 return iemRaiseGeneralProtectionFault0(pIemCpu);
3813 }
3814
3815 /* Raise #DB if general access detect is enabled. */
3816 if (pCtx->dr[7] & X86_DR7_GD)
3817 {
3818 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
3819 return iemRaiseDebugException(pIemCpu);
3820 }
3821
3822 /*
3823 * Read the debug register and store it in the specified general register.
3824 */
3825 uint64_t drX;
3826 switch (iDrReg)
3827 {
3828 case 0: drX = pCtx->dr[0]; break;
3829 case 1: drX = pCtx->dr[1]; break;
3830 case 2: drX = pCtx->dr[2]; break;
3831 case 3: drX = pCtx->dr[3]; break;
3832 case 6:
3833 case 4:
3834 drX = pCtx->dr[6];
3835 drX &= ~RT_BIT_32(12);
3836 drX |= UINT32_C(0xffff0ff0);
3837 break;
3838 case 7:
3839 case 5:
3840 drX = pCtx->dr[7];
3841 drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3842 drX |= RT_BIT_32(10);
3843 break;
3844 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3845 }
3846
3847 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3848 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
3849 else
3850 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
3851
3852 iemRegAddToRip(pIemCpu, cbInstr);
3853 return VINF_SUCCESS;
3854}
3855
3856
3857/**
3858 * Implements mov DRx,GReg.
3859 *
3860 * @param iDrReg The DRx register to write (valid).
3861 * @param iGReg The general register to load the DRx value from.
3862 */
3863IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
3864{
3865 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3866
3867 /*
3868 * Check preconditions.
3869 */
3870 if (pIemCpu->uCpl != 0)
3871 return iemRaiseGeneralProtectionFault0(pIemCpu);
3872 Assert(!pCtx->eflags.Bits.u1VM);
3873
3874 if ( (iDrReg == 4 || iDrReg == 5)
3875 && (pCtx->cr4 & X86_CR4_DE) )
3876 {
3877 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
3878 return iemRaiseGeneralProtectionFault0(pIemCpu);
3879 }
3880
3881 /* Raise #DB if general access detect is enabled. */
3882 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
3883 * \#GP? */
3884 if (pCtx->dr[7] & X86_DR7_GD)
3885 {
3886 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
3887 return iemRaiseDebugException(pIemCpu);
3888 }
3889
3890 /*
3891 * Read the new value from the source register.
3892 */
3893 uint64_t uNewDrX;
3894 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3895 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
3896 else
3897 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
3898
3899 /*
3900 * Adjust it.
3901 */
3902 switch (iDrReg)
3903 {
3904 case 0:
3905 case 1:
3906 case 2:
3907 case 3:
3908 /* nothing to adjust */
3909 break;
3910
3911 case 6:
3912 case 4:
3913 if (uNewDrX & UINT64_C(0xffffffff00000000))
3914 {
3915 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3916 return iemRaiseGeneralProtectionFault0(pIemCpu);
3917 }
3918 uNewDrX &= ~RT_BIT_32(12);
3919 uNewDrX |= UINT32_C(0xffff0ff0);
3920 break;
3921
3922 case 7:
3923 case 5:
3924 if (uNewDrX & UINT64_C(0xffffffff00000000))
3925 {
3926 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3927 return iemRaiseGeneralProtectionFault0(pIemCpu);
3928 }
3929 uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3930 uNewDrX |= RT_BIT_32(10);
3931 break;
3932
3933 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3934 }
3935
3936 /*
3937 * Do the actual setting.
3938 */
3939 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3940 {
3941 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
3942 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
3943 }
3944 else
3945 pCtx->dr[iDrReg] = uNewDrX;
3946
3947 iemRegAddToRip(pIemCpu, cbInstr);
3948 return VINF_SUCCESS;
3949}
3950
3951
3952/**
3953 * Implements 'INVLPG m'.
3954 *
3955 * @param GCPtrPage The effective address of the page to invalidate.
3956 * @remarks Updates the RIP.
3957 */
3958IEM_CIMPL_DEF_1(iemCImpl_invlpg, uint8_t, GCPtrPage)
3959{
3960 /* ring-0 only. */
3961 if (pIemCpu->uCpl != 0)
3962 return iemRaiseGeneralProtectionFault0(pIemCpu);
3963 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3964
3965 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
3966 iemRegAddToRip(pIemCpu, cbInstr);
3967
3968 if (rc == VINF_SUCCESS)
3969 return VINF_SUCCESS;
3970 if (rc == VINF_PGM_SYNC_CR3)
3971 return iemSetPassUpStatus(pIemCpu, rc);
3972
3973 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
3974 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", rc));
3975 return rc;
3976}
3977
3978
3979/**
3980 * Implements RDTSC.
3981 */
3982IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
3983{
3984 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3985
3986 /*
3987 * Check preconditions.
3988 */
3989 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
3990 return iemRaiseUndefinedOpcode(pIemCpu);
3991
3992 if ( (pCtx->cr4 & X86_CR4_TSD)
3993 && pIemCpu->uCpl != 0)
3994 {
3995 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
3996 return iemRaiseGeneralProtectionFault0(pIemCpu);
3997 }
3998
3999 /*
4000 * Do the job.
4001 */
4002 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
4003 pCtx->rax = (uint32_t)uTicks;
4004 pCtx->rdx = uTicks >> 32;
4005#ifdef IEM_VERIFICATION_MODE_FULL
4006 pIemCpu->fIgnoreRaxRdx = true;
4007#endif
4008
4009 iemRegAddToRip(pIemCpu, cbInstr);
4010 return VINF_SUCCESS;
4011}
4012
4013
4014/**
4015 * Implements RDMSR.
4016 */
4017IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
4018{
4019 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4020
4021 /*
4022 * Check preconditions.
4023 */
4024 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
4025 return iemRaiseUndefinedOpcode(pIemCpu);
4026 if (pIemCpu->uCpl != 0)
4027 return iemRaiseGeneralProtectionFault0(pIemCpu);
4028
4029 /*
4030 * Do the job.
4031 */
4032 RTUINT64U uValue;
4033 int rc = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
4034 if (rc != VINF_SUCCESS)
4035 {
4036 Log(("IEM: rdmsr(%#x) -> GP(0)\n", pCtx->ecx));
4037 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
4038 return iemRaiseGeneralProtectionFault0(pIemCpu);
4039 }
4040
4041 pCtx->rax = uValue.s.Lo;
4042 pCtx->rdx = uValue.s.Hi;
4043
4044 iemRegAddToRip(pIemCpu, cbInstr);
4045 return VINF_SUCCESS;
4046}
4047
4048
4049/**
4050 * Implements WRMSR.
4051 */
4052IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
4053{
4054 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4055
4056 /*
4057 * Check preconditions.
4058 */
4059 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
4060 return iemRaiseUndefinedOpcode(pIemCpu);
4061 if (pIemCpu->uCpl != 0)
4062 return iemRaiseGeneralProtectionFault0(pIemCpu);
4063
4064 /*
4065 * Do the job.
4066 */
4067 RTUINT64U uValue;
4068 uValue.s.Lo = pCtx->eax;
4069 uValue.s.Hi = pCtx->edx;
4070
4071 int rc;
4072 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4073 rc = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
4074 else
4075 {
4076 CPUMCTX CtxTmp = *pCtx;
4077 rc = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
4078 PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
4079 *pCtx = *pCtx2;
4080 *pCtx2 = CtxTmp;
4081 }
4082 if (rc != VINF_SUCCESS)
4083 {
4084 Log(("IEM: wrmsr(%#x,%#x`%08x) -> GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
4085 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
4086 return iemRaiseGeneralProtectionFault0(pIemCpu);
4087 }
4088
4089 iemRegAddToRip(pIemCpu, cbInstr);
4090 return VINF_SUCCESS;
4091}
4092
4093
4094/**
4095 * Implements 'IN eAX, port'.
4096 *
4097 * @param u16Port The source port.
4098 * @param cbReg The register size.
4099 */
4100IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
4101{
4102 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4103
4104 /*
4105 * CPL check
4106 */
4107 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
4108 if (rcStrict != VINF_SUCCESS)
4109 return rcStrict;
4110
4111 /*
4112 * Perform the I/O.
4113 */
4114 uint32_t u32Value;
4115 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4116 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, &u32Value, cbReg);
4117 else
4118 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
4119 if (IOM_SUCCESS(rcStrict))
4120 {
4121 switch (cbReg)
4122 {
4123 case 1: pCtx->al = (uint8_t)u32Value; break;
4124 case 2: pCtx->ax = (uint16_t)u32Value; break;
4125 case 4: pCtx->rax = u32Value; break;
4126 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
4127 }
4128 iemRegAddToRip(pIemCpu, cbInstr);
4129 pIemCpu->cPotentialExits++;
4130 if (rcStrict != VINF_SUCCESS)
4131 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
4132 }
4133
4134 return rcStrict;
4135}
4136
4137
4138/**
4139 * Implements 'IN eAX, DX'.
4140 *
4141 * @param cbReg The register size.
4142 */
4143IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
4144{
4145 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
4146}
4147
4148
4149/**
4150 * Implements 'OUT port, eAX'.
4151 *
4152 * @param u16Port The destination port.
4153 * @param cbReg The register size.
4154 */
4155IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
4156{
4157 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4158
4159 /*
4160 * CPL check
4161 */
4162 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
4163 if (rcStrict != VINF_SUCCESS)
4164 return rcStrict;
4165
4166 /*
4167 * Perform the I/O.
4168 */
4169 uint32_t u32Value;
4170 switch (cbReg)
4171 {
4172 case 1: u32Value = pCtx->al; break;
4173 case 2: u32Value = pCtx->ax; break;
4174 case 4: u32Value = pCtx->eax; break;
4175 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
4176 }
4177 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4178 rcStrict = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, u32Value, cbReg);
4179 else
4180 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
4181 if (IOM_SUCCESS(rcStrict))
4182 {
4183 iemRegAddToRip(pIemCpu, cbInstr);
4184 pIemCpu->cPotentialExits++;
4185 if (rcStrict != VINF_SUCCESS)
4186 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
4187 }
4188 return rcStrict;
4189}
4190
4191
4192/**
4193 * Implements 'OUT DX, eAX'.
4194 *
4195 * @param cbReg The register size.
4196 */
4197IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
4198{
4199 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
4200}
4201
4202
4203/**
4204 * Implements 'CLI'.
4205 */
4206IEM_CIMPL_DEF_0(iemCImpl_cli)
4207{
4208 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4209 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4210 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
4211 uint32_t const fEflOld = fEfl;
4212 if (pCtx->cr0 & X86_CR0_PE)
4213 {
4214 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
4215 if (!(fEfl & X86_EFL_VM))
4216 {
4217 if (pIemCpu->uCpl <= uIopl)
4218 fEfl &= ~X86_EFL_IF;
4219 else if ( pIemCpu->uCpl == 3
4220 && (pCtx->cr4 & X86_CR4_PVI) )
4221 fEfl &= ~X86_EFL_VIF;
4222 else
4223 return iemRaiseGeneralProtectionFault0(pIemCpu);
4224 }
4225 /* V8086 */
4226 else if (uIopl == 3)
4227 fEfl &= ~X86_EFL_IF;
4228 else if ( uIopl < 3
4229 && (pCtx->cr4 & X86_CR4_VME) )
4230 fEfl &= ~X86_EFL_VIF;
4231 else
4232 return iemRaiseGeneralProtectionFault0(pIemCpu);
4233 }
4234 /* real mode */
4235 else
4236 fEfl &= ~X86_EFL_IF;
4237
4238 /* Commit. */
4239 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
4240 iemRegAddToRip(pIemCpu, cbInstr);
4241 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld);
4242 return VINF_SUCCESS;
4243}
4244
4245
4246/**
4247 * Implements 'STI'.
4248 */
4249IEM_CIMPL_DEF_0(iemCImpl_sti)
4250{
4251 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4252 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4253 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
4254 uint32_t const fEflOld = fEfl;
4255
4256 if (pCtx->cr0 & X86_CR0_PE)
4257 {
4258 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
4259 if (!(fEfl & X86_EFL_VM))
4260 {
4261 if (pIemCpu->uCpl <= uIopl)
4262 fEfl |= X86_EFL_IF;
4263 else if ( pIemCpu->uCpl == 3
4264 && (pCtx->cr4 & X86_CR4_PVI)
4265 && !(fEfl & X86_EFL_VIP) )
4266 fEfl |= X86_EFL_VIF;
4267 else
4268 return iemRaiseGeneralProtectionFault0(pIemCpu);
4269 }
4270 /* V8086 */
4271 else if (uIopl == 3)
4272 fEfl |= X86_EFL_IF;
4273 else if ( uIopl < 3
4274 && (pCtx->cr4 & X86_CR4_VME)
4275 && !(fEfl & X86_EFL_VIP) )
4276 fEfl |= X86_EFL_VIF;
4277 else
4278 return iemRaiseGeneralProtectionFault0(pIemCpu);
4279 }
4280 /* real mode */
4281 else
4282 fEfl |= X86_EFL_IF;
4283
4284 /* Commit. */
4285 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
4286 iemRegAddToRip(pIemCpu, cbInstr);
4287 if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_VERIFICATION_ENABLED(pIemCpu))
4288 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4289 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
4290 return VINF_SUCCESS;
4291}
4292
4293
4294/**
4295 * Implements 'HLT'.
4296 */
4297IEM_CIMPL_DEF_0(iemCImpl_hlt)
4298{
4299 if (pIemCpu->uCpl != 0)
4300 return iemRaiseGeneralProtectionFault0(pIemCpu);
4301 iemRegAddToRip(pIemCpu, cbInstr);
4302 return VINF_EM_HALT;
4303}
4304
4305
4306/**
4307 * Implements 'CPUID'.
4308 */
4309IEM_CIMPL_DEF_0(iemCImpl_cpuid)
4310{
4311 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4312
4313 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
4314 pCtx->rax &= UINT32_C(0xffffffff);
4315 pCtx->rbx &= UINT32_C(0xffffffff);
4316 pCtx->rcx &= UINT32_C(0xffffffff);
4317 pCtx->rdx &= UINT32_C(0xffffffff);
4318
4319 iemRegAddToRip(pIemCpu, cbInstr);
4320 return VINF_SUCCESS;
4321}
4322
4323
4324/**
4325 * Implements 'AAD'.
4326 *
4327 * @param enmEffOpSize The effective operand size.
4328 */
4329IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
4330{
4331 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4332
4333 uint16_t const ax = pCtx->ax;
4334 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
4335 pCtx->ax = al;
4336 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
4337 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
4338 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
4339
4340 iemRegAddToRip(pIemCpu, cbInstr);
4341 return VINF_SUCCESS;
4342}
4343
4344
4345/**
4346 * Implements 'AAM'.
4347 *
4348 * @param bImm The immediate operand. Cannot be 0.
4349 */
4350IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
4351{
4352 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4353 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
4354
4355 uint16_t const ax = pCtx->ax;
4356 uint8_t const al = (uint8_t)ax % bImm;
4357 uint8_t const ah = (uint8_t)ax / bImm;
4358 pCtx->ax = (ah << 8) + al;
4359 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
4360 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
4361 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
4362
4363 iemRegAddToRip(pIemCpu, cbInstr);
4364 return VINF_SUCCESS;
4365}
4366
4367
4368
4369
4370/*
4371 * Instantiate the various string operation combinations.
4372 */
4373#define OP_SIZE 8
4374#define ADDR_SIZE 16
4375#include "IEMAllCImplStrInstr.cpp.h"
4376#define OP_SIZE 8
4377#define ADDR_SIZE 32
4378#include "IEMAllCImplStrInstr.cpp.h"
4379#define OP_SIZE 8
4380#define ADDR_SIZE 64
4381#include "IEMAllCImplStrInstr.cpp.h"
4382
4383#define OP_SIZE 16
4384#define ADDR_SIZE 16
4385#include "IEMAllCImplStrInstr.cpp.h"
4386#define OP_SIZE 16
4387#define ADDR_SIZE 32
4388#include "IEMAllCImplStrInstr.cpp.h"
4389#define OP_SIZE 16
4390#define ADDR_SIZE 64
4391#include "IEMAllCImplStrInstr.cpp.h"
4392
4393#define OP_SIZE 32
4394#define ADDR_SIZE 16
4395#include "IEMAllCImplStrInstr.cpp.h"
4396#define OP_SIZE 32
4397#define ADDR_SIZE 32
4398#include "IEMAllCImplStrInstr.cpp.h"
4399#define OP_SIZE 32
4400#define ADDR_SIZE 64
4401#include "IEMAllCImplStrInstr.cpp.h"
4402
4403#define OP_SIZE 64
4404#define ADDR_SIZE 32
4405#include "IEMAllCImplStrInstr.cpp.h"
4406#define OP_SIZE 64
4407#define ADDR_SIZE 64
4408#include "IEMAllCImplStrInstr.cpp.h"
4409
4410
4411/**
4412 * Implements 'FINIT' and 'FNINIT'.
4413 *
4414 * @param fCheckXcpts Whether to check for umasked pending exceptions or
4415 * not.
4416 */
4417IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
4418{
4419 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4420
4421 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4422 return iemRaiseDeviceNotAvailable(pIemCpu);
4423
4424 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
4425 if (fCheckXcpts && TODO )
4426 return iemRaiseMathFault(pIemCpu);
4427 */
4428
4429 if (iemFRegIsFxSaveFormat(pIemCpu))
4430 {
4431 pCtx->fpu.FCW = 0x37f;
4432 pCtx->fpu.FSW = 0;
4433 pCtx->fpu.FTW = 0x00; /* 0 - empty. */
4434 pCtx->fpu.FPUDP = 0;
4435 pCtx->fpu.DS = 0; //??
4436 pCtx->fpu.Rsrvd2= 0;
4437 pCtx->fpu.FPUIP = 0;
4438 pCtx->fpu.CS = 0; //??
4439 pCtx->fpu.Rsrvd1= 0;
4440 pCtx->fpu.FOP = 0;
4441 }
4442 else
4443 {
4444 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
4445 pFpu->FCW = 0x37f;
4446 pFpu->FSW = 0;
4447 pFpu->FTW = 0xffff; /* 11 - empty */
4448 pFpu->FPUOO = 0; //??
4449 pFpu->FPUOS = 0; //??
4450 pFpu->FPUIP = 0;
4451 pFpu->CS = 0; //??
4452 pFpu->FOP = 0;
4453 }
4454
4455 iemHlpUsedFpu(pIemCpu);
4456 iemRegAddToRip(pIemCpu, cbInstr);
4457 return VINF_SUCCESS;
4458}
4459
4460
4461/**
4462 * Implements 'FXSAVE'.
4463 *
4464 * @param iEffSeg The effective segment.
4465 * @param GCPtrEff The address of the image.
4466 * @param enmEffOpSize The operand size (only REX.W really matters).
4467 */
4468IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
4469{
4470 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4471
4472 /*
4473 * Raise exceptions.
4474 */
4475 if (pCtx->cr0 & X86_CR0_EM)
4476 return iemRaiseUndefinedOpcode(pIemCpu);
4477 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
4478 return iemRaiseDeviceNotAvailable(pIemCpu);
4479 if (GCPtrEff & 15)
4480 {
4481 /** @todo CPU/VM detection possible! \#AC might not be signal for
4482 * all/any misalignment sizes, intel says its an implementation detail. */
4483 if ( (pCtx->cr0 & X86_CR0_AM)
4484 && pCtx->eflags.Bits.u1AC
4485 && pIemCpu->uCpl == 3)
4486 return iemRaiseAlignmentCheckException(pIemCpu);
4487 return iemRaiseGeneralProtectionFault0(pIemCpu);
4488 }
4489 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4490
4491 /*
4492 * Access the memory.
4493 */
4494 void *pvMem512;
4495 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4496 if (rcStrict != VINF_SUCCESS)
4497 return rcStrict;
4498 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
4499
4500 /*
4501 * Store the registers.
4502 */
4503 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4504 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
4505
4506 /* common for all formats */
4507 pDst->FCW = pCtx->fpu.FCW;
4508 pDst->FSW = pCtx->fpu.FSW;
4509 pDst->FTW = pCtx->fpu.FTW & UINT16_C(0xff);
4510 pDst->FOP = pCtx->fpu.FOP;
4511 pDst->MXCSR = pCtx->fpu.MXCSR;
4512 pDst->MXCSR_MASK = pCtx->fpu.MXCSR_MASK;
4513 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
4514 {
4515 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
4516 * them for now... */
4517 pDst->aRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
4518 pDst->aRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
4519 pDst->aRegs[i].au32[2] = pCtx->fpu.aRegs[i].au32[2] & UINT32_C(0xffff);
4520 pDst->aRegs[i].au32[3] = 0;
4521 }
4522
4523 /* FPU IP, CS, DP and DS. */
4524 /** @todo FPU IP, CS, DP and DS cannot be implemented correctly without extra
4525 * state information. :-/
4526 * Storing zeros now to prevent any potential leakage of host info. */
4527 pDst->FPUIP = 0;
4528 pDst->CS = 0;
4529 pDst->Rsrvd1 = 0;
4530 pDst->FPUDP = 0;
4531 pDst->DS = 0;
4532 pDst->Rsrvd2 = 0;
4533
4534 /* XMM registers. */
4535 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4536 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4537 || pIemCpu->uCpl != 0)
4538 {
4539 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4540 for (uint32_t i = 0; i < cXmmRegs; i++)
4541 pDst->aXMM[i] = pCtx->fpu.aXMM[i];
4542 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
4543 * right? */
4544 }
4545
4546 /*
4547 * Commit the memory.
4548 */
4549 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4550 if (rcStrict != VINF_SUCCESS)
4551 return rcStrict;
4552
4553 iemRegAddToRip(pIemCpu, cbInstr);
4554 return VINF_SUCCESS;
4555}
4556
4557
4558/**
4559 * Implements 'FXRSTOR'.
4560 *
4561 * @param GCPtrEff The address of the image.
4562 * @param enmEffOpSize The operand size (only REX.W really matters).
4563 */
4564IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
4565{
4566 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4567
4568 /*
4569 * Raise exceptions.
4570 */
4571 if (pCtx->cr0 & X86_CR0_EM)
4572 return iemRaiseUndefinedOpcode(pIemCpu);
4573 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
4574 return iemRaiseDeviceNotAvailable(pIemCpu);
4575 if (GCPtrEff & 15)
4576 {
4577 /** @todo CPU/VM detection possible! \#AC might not be signal for
4578 * all/any misalignment sizes, intel says its an implementation detail. */
4579 if ( (pCtx->cr0 & X86_CR0_AM)
4580 && pCtx->eflags.Bits.u1AC
4581 && pIemCpu->uCpl == 3)
4582 return iemRaiseAlignmentCheckException(pIemCpu);
4583 return iemRaiseGeneralProtectionFault0(pIemCpu);
4584 }
4585 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4586
4587 /*
4588 * Access the memory.
4589 */
4590 void *pvMem512;
4591 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
4592 if (rcStrict != VINF_SUCCESS)
4593 return rcStrict;
4594 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
4595
4596 /*
4597 * Check the state for stuff which will GP(0).
4598 */
4599 uint32_t const fMXCSR = pSrc->MXCSR;
4600 uint32_t const fMXCSR_MASK = pCtx->fpu.MXCSR_MASK ? pCtx->fpu.MXCSR_MASK : UINT32_C(0xffbf);
4601 if (fMXCSR & ~fMXCSR_MASK)
4602 {
4603 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
4604 return iemRaiseGeneralProtectionFault0(pIemCpu);
4605 }
4606
4607 /*
4608 * Load the registers.
4609 */
4610 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4611 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
4612
4613 /* common for all formats */
4614 pCtx->fpu.FCW = pSrc->FCW;
4615 pCtx->fpu.FSW = pSrc->FSW;
4616 pCtx->fpu.FTW = pSrc->FTW & UINT16_C(0xff);
4617 pCtx->fpu.FOP = pSrc->FOP;
4618 pCtx->fpu.MXCSR = fMXCSR;
4619 /* (MXCSR_MASK is read-only) */
4620 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
4621 {
4622 pCtx->fpu.aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
4623 pCtx->fpu.aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
4624 pCtx->fpu.aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
4625 pCtx->fpu.aRegs[i].au32[3] = 0;
4626 }
4627
4628 /* FPU IP, CS, DP and DS. */
4629 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4630 {
4631 pCtx->fpu.FPUIP = pSrc->FPUIP;
4632 pCtx->fpu.CS = pSrc->CS;
4633 pCtx->fpu.Rsrvd1 = pSrc->Rsrvd1;
4634 pCtx->fpu.FPUDP = pSrc->FPUDP;
4635 pCtx->fpu.DS = pSrc->DS;
4636 pCtx->fpu.Rsrvd2 = pSrc->Rsrvd2;
4637 }
4638 else
4639 {
4640 pCtx->fpu.FPUIP = pSrc->FPUIP;
4641 pCtx->fpu.CS = pSrc->CS;
4642 pCtx->fpu.Rsrvd1 = 0;
4643 pCtx->fpu.FPUDP = pSrc->FPUDP;
4644 pCtx->fpu.DS = pSrc->DS;
4645 pCtx->fpu.Rsrvd2 = 0;
4646 }
4647
4648 /* XMM registers. */
4649 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4650 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4651 || pIemCpu->uCpl != 0)
4652 {
4653 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4654 for (uint32_t i = 0; i < cXmmRegs; i++)
4655 pCtx->fpu.aXMM[i] = pSrc->aXMM[i];
4656 }
4657
4658 /*
4659 * Commit the memory.
4660 */
4661 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
4662 if (rcStrict != VINF_SUCCESS)
4663 return rcStrict;
4664
4665 iemHlpUsedFpu(pIemCpu);
4666 iemRegAddToRip(pIemCpu, cbInstr);
4667 return VINF_SUCCESS;
4668}
4669
4670
4671/**
4672 * Commmon routine for fnstenv and fnsave.
4673 *
4674 * @param uPtr Where to store the state.
4675 * @param pCtx The CPU context.
4676 */
4677static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
4678{
4679 if (enmEffOpSize == IEMMODE_16BIT)
4680 {
4681 uPtr.pu16[0] = pCtx->fpu.FCW;
4682 uPtr.pu16[1] = pCtx->fpu.FSW;
4683 uPtr.pu16[2] = iemFpuCalcFullFtw(pCtx);
4684 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4685 {
4686 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
4687 * protected mode or long mode and we save it in real mode? And vice
4688 * versa? And with 32-bit operand size? I think CPU is storing the
4689 * effective address ((CS << 4) + IP) in the offset register and not
4690 * doing any address calculations here. */
4691 uPtr.pu16[3] = (uint16_t)pCtx->fpu.FPUIP;
4692 uPtr.pu16[4] = ((pCtx->fpu.FPUIP >> 4) & UINT16_C(0xf000)) | pCtx->fpu.FOP;
4693 uPtr.pu16[5] = (uint16_t)pCtx->fpu.FPUDP;
4694 uPtr.pu16[6] = (pCtx->fpu.FPUDP >> 4) & UINT16_C(0xf000);
4695 }
4696 else
4697 {
4698 uPtr.pu16[3] = pCtx->fpu.FPUIP;
4699 uPtr.pu16[4] = pCtx->fpu.CS;
4700 uPtr.pu16[5] = pCtx->fpu.FPUDP;
4701 uPtr.pu16[6] = pCtx->fpu.DS;
4702 }
4703 }
4704 else
4705 {
4706 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
4707 uPtr.pu16[0*2] = pCtx->fpu.FCW;
4708 uPtr.pu16[1*2] = pCtx->fpu.FSW;
4709 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pCtx);
4710 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4711 {
4712 uPtr.pu16[3*2] = (uint16_t)pCtx->fpu.FPUIP;
4713 uPtr.pu32[4] = ((pCtx->fpu.FPUIP & UINT32_C(0xffff0000)) >> 4) | pCtx->fpu.FOP;
4714 uPtr.pu16[5*2] = (uint16_t)pCtx->fpu.FPUDP;
4715 uPtr.pu32[6] = (pCtx->fpu.FPUDP & UINT32_C(0xffff0000)) >> 4;
4716 }
4717 else
4718 {
4719 uPtr.pu32[3] = pCtx->fpu.FPUIP;
4720 uPtr.pu16[4*2] = pCtx->fpu.CS;
4721 uPtr.pu16[4*2+1]= pCtx->fpu.FOP;
4722 uPtr.pu32[5] = pCtx->fpu.FPUDP;
4723 uPtr.pu16[6*2] = pCtx->fpu.DS;
4724 }
4725 }
4726}
4727
4728
4729/**
4730 * Commmon routine for fldenv and frstor
4731 *
4732 * @param uPtr Where to store the state.
4733 * @param pCtx The CPU context.
4734 */
4735static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
4736{
4737 if (enmEffOpSize == IEMMODE_16BIT)
4738 {
4739 pCtx->fpu.FCW = uPtr.pu16[0];
4740 pCtx->fpu.FSW = uPtr.pu16[1];
4741 pCtx->fpu.FTW = uPtr.pu16[2];
4742 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4743 {
4744 pCtx->fpu.FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
4745 pCtx->fpu.FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
4746 pCtx->fpu.FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
4747 pCtx->fpu.CS = 0;
4748 pCtx->fpu.Rsrvd1= 0;
4749 pCtx->fpu.DS = 0;
4750 pCtx->fpu.Rsrvd2= 0;
4751 }
4752 else
4753 {
4754 pCtx->fpu.FPUIP = uPtr.pu16[3];
4755 pCtx->fpu.CS = uPtr.pu16[4];
4756 pCtx->fpu.Rsrvd1= 0;
4757 pCtx->fpu.FPUDP = uPtr.pu16[5];
4758 pCtx->fpu.DS = uPtr.pu16[6];
4759 pCtx->fpu.Rsrvd2= 0;
4760 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
4761 }
4762 }
4763 else
4764 {
4765 pCtx->fpu.FCW = uPtr.pu16[0*2];
4766 pCtx->fpu.FSW = uPtr.pu16[1*2];
4767 pCtx->fpu.FTW = uPtr.pu16[2*2];
4768 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4769 {
4770 pCtx->fpu.FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
4771 pCtx->fpu.FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
4772 pCtx->fpu.FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
4773 pCtx->fpu.CS = 0;
4774 pCtx->fpu.Rsrvd1= 0;
4775 pCtx->fpu.DS = 0;
4776 pCtx->fpu.Rsrvd2= 0;
4777 }
4778 else
4779 {
4780 pCtx->fpu.FPUIP = uPtr.pu32[3];
4781 pCtx->fpu.CS = uPtr.pu16[4*2];
4782 pCtx->fpu.Rsrvd1= 0;
4783 pCtx->fpu.FOP = uPtr.pu16[4*2+1];
4784 pCtx->fpu.FPUDP = uPtr.pu32[5];
4785 pCtx->fpu.DS = uPtr.pu16[6*2];
4786 pCtx->fpu.Rsrvd2= 0;
4787 }
4788 }
4789
4790 /* Make adjustments. */
4791 pCtx->fpu.FTW = iemFpuCompressFtw(pCtx->fpu.FTW);
4792 pCtx->fpu.FCW &= ~X86_FCW_ZERO_MASK;
4793 iemFpuRecalcExceptionStatus(pCtx);
4794 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
4795 * exceptions are pending after loading the saved state? */
4796}
4797
4798
4799/**
4800 * Implements 'FNSTENV'.
4801 *
4802 * @param enmEffOpSize The operand size (only REX.W really matters).
4803 * @param iEffSeg The effective segment register for @a GCPtrEff.
4804 * @param GCPtrEffDst The address of the image.
4805 */
4806IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4807{
4808 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4809 RTPTRUNION uPtr;
4810 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4811 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4812 if (rcStrict != VINF_SUCCESS)
4813 return rcStrict;
4814
4815 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4816
4817 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4818 if (rcStrict != VINF_SUCCESS)
4819 return rcStrict;
4820
4821 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4822 iemRegAddToRip(pIemCpu, cbInstr);
4823 return VINF_SUCCESS;
4824}
4825
4826
4827/**
4828 * Implements 'FNSAVE'.
4829 *
4830 * @param GCPtrEffDst The address of the image.
4831 * @param enmEffOpSize The operand size.
4832 */
4833IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4834{
4835 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4836 RTPTRUNION uPtr;
4837 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
4838 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4839 if (rcStrict != VINF_SUCCESS)
4840 return rcStrict;
4841
4842 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4843 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
4844 for (uint32_t i = 0; i < RT_ELEMENTS(pCtx->fpu.aRegs); i++)
4845 {
4846 paRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
4847 paRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
4848 paRegs[i].au16[4] = pCtx->fpu.aRegs[i].au16[4];
4849 }
4850
4851 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4852 if (rcStrict != VINF_SUCCESS)
4853 return rcStrict;
4854
4855 /*
4856 * Re-initialize the FPU.
4857 */
4858 pCtx->fpu.FCW = 0x37f;
4859 pCtx->fpu.FSW = 0;
4860 pCtx->fpu.FTW = 0x00; /* 0 - empty */
4861 pCtx->fpu.FPUDP = 0;
4862 pCtx->fpu.DS = 0;
4863 pCtx->fpu.Rsrvd2= 0;
4864 pCtx->fpu.FPUIP = 0;
4865 pCtx->fpu.CS = 0;
4866 pCtx->fpu.Rsrvd1= 0;
4867 pCtx->fpu.FOP = 0;
4868
4869 iemHlpUsedFpu(pIemCpu);
4870 iemRegAddToRip(pIemCpu, cbInstr);
4871 return VINF_SUCCESS;
4872}
4873
4874
4875
4876/**
4877 * Implements 'FLDENV'.
4878 *
4879 * @param enmEffOpSize The operand size (only REX.W really matters).
4880 * @param iEffSeg The effective segment register for @a GCPtrEff.
4881 * @param GCPtrEffSrc The address of the image.
4882 */
4883IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
4884{
4885 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4886 RTCPTRUNION uPtr;
4887 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4888 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
4889 if (rcStrict != VINF_SUCCESS)
4890 return rcStrict;
4891
4892 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4893
4894 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
4895 if (rcStrict != VINF_SUCCESS)
4896 return rcStrict;
4897
4898 iemHlpUsedFpu(pIemCpu);
4899 iemRegAddToRip(pIemCpu, cbInstr);
4900 return VINF_SUCCESS;
4901}
4902
4903
4904/**
4905 * Implements 'FRSTOR'.
4906 *
4907 * @param GCPtrEffSrc The address of the image.
4908 * @param enmEffOpSize The operand size.
4909 */
4910IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
4911{
4912 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4913 RTCPTRUNION uPtr;
4914 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
4915 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
4916 if (rcStrict != VINF_SUCCESS)
4917 return rcStrict;
4918
4919 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4920 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
4921 for (uint32_t i = 0; i < RT_ELEMENTS(pCtx->fpu.aRegs); i++)
4922 {
4923 pCtx->fpu.aRegs[i].au32[0] = paRegs[i].au32[0];
4924 pCtx->fpu.aRegs[i].au32[1] = paRegs[i].au32[1];
4925 pCtx->fpu.aRegs[i].au32[2] = paRegs[i].au16[4];
4926 pCtx->fpu.aRegs[i].au32[3] = 0;
4927 }
4928
4929 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
4930 if (rcStrict != VINF_SUCCESS)
4931 return rcStrict;
4932
4933 iemHlpUsedFpu(pIemCpu);
4934 iemRegAddToRip(pIemCpu, cbInstr);
4935 return VINF_SUCCESS;
4936}
4937
4938
4939/**
4940 * Implements 'FLDCW'.
4941 *
4942 * @param u16Fcw The new FCW.
4943 */
4944IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
4945{
4946 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4947
4948 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
4949 /** @todo Testcase: Try see what happens when trying to set undefined bits
4950 * (other than 6 and 7). Currently ignoring them. */
4951 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
4952 * according to FSW. (This is was is currently implemented.) */
4953 pCtx->fpu.FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
4954 iemFpuRecalcExceptionStatus(pCtx);
4955
4956 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4957 iemHlpUsedFpu(pIemCpu);
4958 iemRegAddToRip(pIemCpu, cbInstr);
4959 return VINF_SUCCESS;
4960}
4961
4962
4963
4964/**
4965 * Implements the underflow case of fxch.
4966 *
4967 * @param iStReg The other stack register.
4968 */
4969IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
4970{
4971 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4972
4973 unsigned const iReg1 = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4974 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4975 Assert(!(RT_BIT(iReg1) & pCtx->fpu.FTW) || !(RT_BIT(iReg2) & pCtx->fpu.FTW));
4976
4977 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
4978 * registers are read as QNaN and then exchanged. This could be
4979 * wrong... */
4980 if (pCtx->fpu.FCW & X86_FCW_IM)
4981 {
4982 if (RT_BIT(iReg1) & pCtx->fpu.FTW)
4983 {
4984 if (RT_BIT(iReg2) & pCtx->fpu.FTW)
4985 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4986 else
4987 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[iStReg].r80;
4988 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4989 }
4990 else
4991 {
4992 pCtx->fpu.aRegs[iStReg].r80 = pCtx->fpu.aRegs[0].r80;
4993 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4994 }
4995 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4996 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4997 }
4998 else
4999 {
5000 /* raise underflow exception, don't change anything. */
5001 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
5002 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5003 }
5004
5005 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5006 iemHlpUsedFpu(pIemCpu);
5007 iemRegAddToRip(pIemCpu, cbInstr);
5008 return VINF_SUCCESS;
5009}
5010
5011
5012/**
5013 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
5014 *
5015 * @param cToAdd 1 or 7.
5016 */
5017IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
5018{
5019 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5020 Assert(iStReg < 8);
5021
5022 /*
5023 * Raise exceptions.
5024 */
5025 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
5026 return iemRaiseDeviceNotAvailable(pIemCpu);
5027 uint16_t u16Fsw = pCtx->fpu.FSW;
5028 if (u16Fsw & X86_FSW_ES)
5029 return iemRaiseMathFault(pIemCpu);
5030
5031 /*
5032 * Check if any of the register accesses causes #SF + #IA.
5033 */
5034 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
5035 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
5036 if ((pCtx->fpu.FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
5037 {
5038 uint32_t u32Eflags = pfnAImpl(&pCtx->fpu, &u16Fsw, &pCtx->fpu.aRegs[0].r80, &pCtx->fpu.aRegs[iStReg].r80);
5039 pCtx->fpu.FSW &= ~X86_FSW_C1;
5040 pCtx->fpu.FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
5041 if ( !(u16Fsw & X86_FSW_IE)
5042 || (pCtx->fpu.FCW & X86_FCW_IM) )
5043 {
5044 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
5045 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
5046 }
5047 }
5048 else if (pCtx->fpu.FCW & X86_FCW_IM)
5049 {
5050 /* Masked underflow. */
5051 pCtx->fpu.FSW &= ~X86_FSW_C1;
5052 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
5053 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
5054 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
5055 }
5056 else
5057 {
5058 /* Raise underflow - don't touch EFLAGS or TOP. */
5059 pCtx->fpu.FSW &= ~X86_FSW_C1;
5060 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5061 fPop = false;
5062 }
5063
5064 /*
5065 * Pop if necessary.
5066 */
5067 if (fPop)
5068 {
5069 pCtx->fpu.FTW &= ~RT_BIT(iReg1);
5070 pCtx->fpu.FSW &= X86_FSW_TOP_MASK;
5071 pCtx->fpu.FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
5072 }
5073
5074 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
5075 iemHlpUsedFpu(pIemCpu);
5076 iemRegAddToRip(pIemCpu, cbInstr);
5077 return VINF_SUCCESS;
5078}
5079
5080/** @} */
5081
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette