VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 61010

Last change on this file since 61010 was 61010, checked in by vboxsync, 9 years ago

iemCImpl_retn: Use iemRegAddToRspEx which takes a 16-bit addend instead of iemRegAddToRsp which only takes a 8-bit one. (Found by verifier booting xpsp3, encountered a 'retn 204h' instruction.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 236.7 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 61010 2016-05-17 18:07:49Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23
24/**
25 * Worker function for iemHlpCheckPortIOPermission, don't call directly.
26 *
27 * @returns Strict VBox status code.
28 *
29 * @param pIemCpu The IEM per CPU data.
30 * @param pCtx The register context.
31 * @param u16Port The port number.
32 * @param cbOperand The operand size.
33 */
34static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
35{
36 /* The TSS bits we're interested in are the same on 386 and AMD64. */
37 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY);
38 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
39 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap);
40 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64));
41
42 /*
43 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
44 */
45 Assert(!pCtx->tr.Attr.n.u1DescType);
46 if (RT_UNLIKELY( pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY
47 && pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))
48 {
49 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
50 u16Port, cbOperand, pCtx->tr.Attr.n.u4Type, pCtx->tr.Attr.u));
51 return iemRaiseGeneralProtectionFault0(pIemCpu);
52 }
53
54 /*
55 * Read the bitmap offset (may #PF).
56 */
57 uint16_t offBitmap;
58 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pIemCpu, &offBitmap, UINT8_MAX,
59 pCtx->tr.u64Base + RT_OFFSETOF(X86TSS64, offIoBitmap));
60 if (rcStrict != VINF_SUCCESS)
61 {
62 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
63 return rcStrict;
64 }
65
66 /*
67 * The bit range from u16Port to (u16Port + cbOperand - 1), however intel
68 * describes the CPU actually reading two bytes regardless of whether the
69 * bit range crosses a byte boundrary. Thus the + 1 in the test below.
70 */
71 uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap;
72 /** @todo check if real CPUs ensures that offBitmap has a minimum value of
73 * for instance sizeof(X86TSS32). */
74 if (offFirstBit + 1 > pCtx->tr.u32Limit) /* the limit is inclusive */
75 {
76 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
77 offFirstBit, pCtx->tr.u32Limit));
78 return iemRaiseGeneralProtectionFault0(pIemCpu);
79 }
80
81 /*
82 * Read the necessary bits.
83 */
84 /** @todo Test the assertion in the intel manual that the CPU reads two
85 * bytes. The question is how this works wrt to #PF and #GP on the
86 * 2nd byte when it's not required. */
87 uint16_t bmBytes = UINT16_MAX;
88 rcStrict = iemMemFetchSysU16(pIemCpu, &bmBytes, UINT8_MAX, pCtx->tr.u64Base + offFirstBit);
89 if (rcStrict != VINF_SUCCESS)
90 {
91 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
92 return rcStrict;
93 }
94
95 /*
96 * Perform the check.
97 */
98 uint16_t fPortMask = (1 << cbOperand) - 1;
99 bmBytes >>= (u16Port & 7);
100 if (bmBytes & fPortMask)
101 {
102 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
103 u16Port, cbOperand, bmBytes, fPortMask));
104 return iemRaiseGeneralProtectionFault0(pIemCpu);
105 }
106
107 return VINF_SUCCESS;
108}
109
110
111/**
112 * Checks if we are allowed to access the given I/O port, raising the
113 * appropriate exceptions if we aren't (or if the I/O bitmap is not
114 * accessible).
115 *
116 * @returns Strict VBox status code.
117 *
118 * @param pIemCpu The IEM per CPU data.
119 * @param pCtx The register context.
120 * @param u16Port The port number.
121 * @param cbOperand The operand size.
122 */
123DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
124{
125 X86EFLAGS Efl;
126 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
127 if ( (pCtx->cr0 & X86_CR0_PE)
128 && ( pIemCpu->uCpl > Efl.Bits.u2IOPL
129 || Efl.Bits.u1VM) )
130 return iemHlpCheckPortIOPermissionBitmap(pIemCpu, pCtx, u16Port, cbOperand);
131 return VINF_SUCCESS;
132}
133
134
135#if 0
136/**
137 * Calculates the parity bit.
138 *
139 * @returns true if the bit is set, false if not.
140 * @param u8Result The least significant byte of the result.
141 */
142static bool iemHlpCalcParityFlag(uint8_t u8Result)
143{
144 /*
145 * Parity is set if the number of bits in the least significant byte of
146 * the result is even.
147 */
148 uint8_t cBits;
149 cBits = u8Result & 1; /* 0 */
150 u8Result >>= 1;
151 cBits += u8Result & 1;
152 u8Result >>= 1;
153 cBits += u8Result & 1;
154 u8Result >>= 1;
155 cBits += u8Result & 1;
156 u8Result >>= 1;
157 cBits += u8Result & 1; /* 4 */
158 u8Result >>= 1;
159 cBits += u8Result & 1;
160 u8Result >>= 1;
161 cBits += u8Result & 1;
162 u8Result >>= 1;
163 cBits += u8Result & 1;
164 return !(cBits & 1);
165}
166#endif /* not used */
167
168
169/**
170 * Updates the specified flags according to a 8-bit result.
171 *
172 * @param pIemCpu The IEM state of the calling EMT.
173 * @param u8Result The result to set the flags according to.
174 * @param fToUpdate The flags to update.
175 * @param fUndefined The flags that are specified as undefined.
176 */
177static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
178{
179 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
180
181 uint32_t fEFlags = pCtx->eflags.u;
182 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
183 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
184 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
185#ifdef IEM_VERIFICATION_MODE_FULL
186 pIemCpu->fUndefinedEFlags |= fUndefined;
187#endif
188}
189
190
191/**
192 * Helper used by iret.
193 *
194 * @param uCpl The new CPL.
195 * @param pSReg Pointer to the segment register.
196 */
197static void iemHlpAdjustSelectorForNewCpl(PIEMCPU pIemCpu, uint8_t uCpl, PCPUMSELREG pSReg)
198{
199#ifdef VBOX_WITH_RAW_MODE_NOT_R0
200 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
201 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
202#else
203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
204#endif
205
206 if ( uCpl > pSReg->Attr.n.u2Dpl
207 && pSReg->Attr.n.u1DescType /* code or data, not system */
208 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
209 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
210 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, 0);
211}
212
213
214/**
215 * Indicates that we have modified the FPU state.
216 *
217 * @param pIemCpu The IEM state of the calling EMT.
218 */
219DECLINLINE(void) iemHlpUsedFpu(PIEMCPU pIemCpu)
220{
221 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);
222}
223
224/** @} */
225
226/** @name C Implementations
227 * @{
228 */
229
230/**
231 * Implements a 16-bit popa.
232 */
233IEM_CIMPL_DEF_0(iemCImpl_popa_16)
234{
235 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
236 RTGCPTR GCPtrStart = iemRegGetEffRsp(pIemCpu, pCtx);
237 RTGCPTR GCPtrLast = GCPtrStart + 15;
238 VBOXSTRICTRC rcStrict;
239
240 /*
241 * The docs are a bit hard to comprehend here, but it looks like we wrap
242 * around in real mode as long as none of the individual "popa" crosses the
243 * end of the stack segment. In protected mode we check the whole access
244 * in one go. For efficiency, only do the word-by-word thing if we're in
245 * danger of wrapping around.
246 */
247 /** @todo do popa boundary / wrap-around checks. */
248 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
249 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
250 {
251 /* word-by-word */
252 RTUINT64U TmpRsp;
253 TmpRsp.u = pCtx->rsp;
254 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
255 if (rcStrict == VINF_SUCCESS)
256 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
257 if (rcStrict == VINF_SUCCESS)
258 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
259 if (rcStrict == VINF_SUCCESS)
260 {
261 iemRegAddToRspEx(pIemCpu, pCtx, &TmpRsp, 2); /* sp */
262 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
263 }
264 if (rcStrict == VINF_SUCCESS)
265 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
266 if (rcStrict == VINF_SUCCESS)
267 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
268 if (rcStrict == VINF_SUCCESS)
269 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
270 if (rcStrict == VINF_SUCCESS)
271 {
272 pCtx->rsp = TmpRsp.u;
273 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
274 }
275 }
276 else
277 {
278 uint16_t const *pa16Mem = NULL;
279 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
280 if (rcStrict == VINF_SUCCESS)
281 {
282 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
283 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
284 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
285 /* skip sp */
286 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
287 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
288 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
289 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
290 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
291 if (rcStrict == VINF_SUCCESS)
292 {
293 iemRegAddToRsp(pIemCpu, pCtx, 16);
294 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
295 }
296 }
297 }
298 return rcStrict;
299}
300
301
302/**
303 * Implements a 32-bit popa.
304 */
305IEM_CIMPL_DEF_0(iemCImpl_popa_32)
306{
307 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
308 RTGCPTR GCPtrStart = iemRegGetEffRsp(pIemCpu, pCtx);
309 RTGCPTR GCPtrLast = GCPtrStart + 31;
310 VBOXSTRICTRC rcStrict;
311
312 /*
313 * The docs are a bit hard to comprehend here, but it looks like we wrap
314 * around in real mode as long as none of the individual "popa" crosses the
315 * end of the stack segment. In protected mode we check the whole access
316 * in one go. For efficiency, only do the word-by-word thing if we're in
317 * danger of wrapping around.
318 */
319 /** @todo do popa boundary / wrap-around checks. */
320 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
321 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
322 {
323 /* word-by-word */
324 RTUINT64U TmpRsp;
325 TmpRsp.u = pCtx->rsp;
326 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
327 if (rcStrict == VINF_SUCCESS)
328 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
329 if (rcStrict == VINF_SUCCESS)
330 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
331 if (rcStrict == VINF_SUCCESS)
332 {
333 iemRegAddToRspEx(pIemCpu, pCtx, &TmpRsp, 2); /* sp */
334 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
335 }
336 if (rcStrict == VINF_SUCCESS)
337 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
338 if (rcStrict == VINF_SUCCESS)
339 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
340 if (rcStrict == VINF_SUCCESS)
341 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
342 if (rcStrict == VINF_SUCCESS)
343 {
344#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
345 pCtx->rdi &= UINT32_MAX;
346 pCtx->rsi &= UINT32_MAX;
347 pCtx->rbp &= UINT32_MAX;
348 pCtx->rbx &= UINT32_MAX;
349 pCtx->rdx &= UINT32_MAX;
350 pCtx->rcx &= UINT32_MAX;
351 pCtx->rax &= UINT32_MAX;
352#endif
353 pCtx->rsp = TmpRsp.u;
354 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
355 }
356 }
357 else
358 {
359 uint32_t const *pa32Mem;
360 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
361 if (rcStrict == VINF_SUCCESS)
362 {
363 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
364 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
365 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
366 /* skip esp */
367 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
368 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
369 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
370 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
371 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
372 if (rcStrict == VINF_SUCCESS)
373 {
374 iemRegAddToRsp(pIemCpu, pCtx, 32);
375 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
376 }
377 }
378 }
379 return rcStrict;
380}
381
382
383/**
384 * Implements a 16-bit pusha.
385 */
386IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
387{
388 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
389 RTGCPTR GCPtrTop = iemRegGetEffRsp(pIemCpu, pCtx);
390 RTGCPTR GCPtrBottom = GCPtrTop - 15;
391 VBOXSTRICTRC rcStrict;
392
393 /*
394 * The docs are a bit hard to comprehend here, but it looks like we wrap
395 * around in real mode as long as none of the individual "pushd" crosses the
396 * end of the stack segment. In protected mode we check the whole access
397 * in one go. For efficiency, only do the word-by-word thing if we're in
398 * danger of wrapping around.
399 */
400 /** @todo do pusha boundary / wrap-around checks. */
401 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
402 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
403 {
404 /* word-by-word */
405 RTUINT64U TmpRsp;
406 TmpRsp.u = pCtx->rsp;
407 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
408 if (rcStrict == VINF_SUCCESS)
409 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
410 if (rcStrict == VINF_SUCCESS)
411 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
412 if (rcStrict == VINF_SUCCESS)
413 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
414 if (rcStrict == VINF_SUCCESS)
415 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
416 if (rcStrict == VINF_SUCCESS)
417 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
418 if (rcStrict == VINF_SUCCESS)
419 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
420 if (rcStrict == VINF_SUCCESS)
421 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
422 if (rcStrict == VINF_SUCCESS)
423 {
424 pCtx->rsp = TmpRsp.u;
425 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
426 }
427 }
428 else
429 {
430 GCPtrBottom--;
431 uint16_t *pa16Mem = NULL;
432 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
433 if (rcStrict == VINF_SUCCESS)
434 {
435 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
436 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
437 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
438 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
439 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
440 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
441 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
442 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
443 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
444 if (rcStrict == VINF_SUCCESS)
445 {
446 iemRegSubFromRsp(pIemCpu, pCtx, 16);
447 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
448 }
449 }
450 }
451 return rcStrict;
452}
453
454
455/**
456 * Implements a 32-bit pusha.
457 */
458IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
459{
460 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
461 RTGCPTR GCPtrTop = iemRegGetEffRsp(pIemCpu, pCtx);
462 RTGCPTR GCPtrBottom = GCPtrTop - 31;
463 VBOXSTRICTRC rcStrict;
464
465 /*
466 * The docs are a bit hard to comprehend here, but it looks like we wrap
467 * around in real mode as long as none of the individual "pusha" crosses the
468 * end of the stack segment. In protected mode we check the whole access
469 * in one go. For efficiency, only do the word-by-word thing if we're in
470 * danger of wrapping around.
471 */
472 /** @todo do pusha boundary / wrap-around checks. */
473 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
474 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
475 {
476 /* word-by-word */
477 RTUINT64U TmpRsp;
478 TmpRsp.u = pCtx->rsp;
479 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
480 if (rcStrict == VINF_SUCCESS)
481 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
482 if (rcStrict == VINF_SUCCESS)
483 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
484 if (rcStrict == VINF_SUCCESS)
485 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
486 if (rcStrict == VINF_SUCCESS)
487 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
488 if (rcStrict == VINF_SUCCESS)
489 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
490 if (rcStrict == VINF_SUCCESS)
491 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
492 if (rcStrict == VINF_SUCCESS)
493 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
494 if (rcStrict == VINF_SUCCESS)
495 {
496 pCtx->rsp = TmpRsp.u;
497 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
498 }
499 }
500 else
501 {
502 GCPtrBottom--;
503 uint32_t *pa32Mem;
504 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
505 if (rcStrict == VINF_SUCCESS)
506 {
507 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
508 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
509 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
510 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
511 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
512 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
513 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
514 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
515 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
516 if (rcStrict == VINF_SUCCESS)
517 {
518 iemRegSubFromRsp(pIemCpu, pCtx, 32);
519 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
520 }
521 }
522 }
523 return rcStrict;
524}
525
526
527/**
528 * Implements pushf.
529 *
530 *
531 * @param enmEffOpSize The effective operand size.
532 */
533IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
534{
535 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
536
537 /*
538 * If we're in V8086 mode some care is required (which is why we're in
539 * doing this in a C implementation).
540 */
541 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
542 if ( (fEfl & X86_EFL_VM)
543 && X86_EFL_GET_IOPL(fEfl) != 3 )
544 {
545 Assert(pCtx->cr0 & X86_CR0_PE);
546 if ( enmEffOpSize != IEMMODE_16BIT
547 || !(pCtx->cr4 & X86_CR4_VME))
548 return iemRaiseGeneralProtectionFault0(pIemCpu);
549 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
550 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
551 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
552 }
553
554 /*
555 * Ok, clear RF and VM, adjust for ancient CPUs, and push the flags.
556 */
557 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
558
559 VBOXSTRICTRC rcStrict;
560 switch (enmEffOpSize)
561 {
562 case IEMMODE_16BIT:
563 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
564 if (IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_186)
565 fEfl |= UINT16_C(0xf000);
566 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
567 break;
568 case IEMMODE_32BIT:
569 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
570 break;
571 case IEMMODE_64BIT:
572 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
573 break;
574 IEM_NOT_REACHED_DEFAULT_CASE_RET();
575 }
576 if (rcStrict != VINF_SUCCESS)
577 return rcStrict;
578
579 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
580 return VINF_SUCCESS;
581}
582
583
584/**
585 * Implements popf.
586 *
587 * @param enmEffOpSize The effective operand size.
588 */
589IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
590{
591 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
592 uint32_t const fEflOld = IEMMISC_GET_EFL(pIemCpu, pCtx);
593 VBOXSTRICTRC rcStrict;
594 uint32_t fEflNew;
595
596 /*
597 * V8086 is special as usual.
598 */
599 if (fEflOld & X86_EFL_VM)
600 {
601 /*
602 * Almost anything goes if IOPL is 3.
603 */
604 if (X86_EFL_GET_IOPL(fEflOld) == 3)
605 {
606 switch (enmEffOpSize)
607 {
608 case IEMMODE_16BIT:
609 {
610 uint16_t u16Value;
611 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
612 if (rcStrict != VINF_SUCCESS)
613 return rcStrict;
614 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
615 break;
616 }
617 case IEMMODE_32BIT:
618 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
619 if (rcStrict != VINF_SUCCESS)
620 return rcStrict;
621 break;
622 IEM_NOT_REACHED_DEFAULT_CASE_RET();
623 }
624
625 const uint32_t fPopfBits = IEMCPU_TO_VM(pIemCpu)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
626 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
627 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
628 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
629 }
630 /*
631 * Interrupt flag virtualization with CR4.VME=1.
632 */
633 else if ( enmEffOpSize == IEMMODE_16BIT
634 && (pCtx->cr4 & X86_CR4_VME) )
635 {
636 uint16_t u16Value;
637 RTUINT64U TmpRsp;
638 TmpRsp.u = pCtx->rsp;
639 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
640 if (rcStrict != VINF_SUCCESS)
641 return rcStrict;
642
643 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
644 * or before? */
645 if ( ( (u16Value & X86_EFL_IF)
646 && (fEflOld & X86_EFL_VIP))
647 || (u16Value & X86_EFL_TF) )
648 return iemRaiseGeneralProtectionFault0(pIemCpu);
649
650 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
651 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
652 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
653 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
654
655 pCtx->rsp = TmpRsp.u;
656 }
657 else
658 return iemRaiseGeneralProtectionFault0(pIemCpu);
659
660 }
661 /*
662 * Not in V8086 mode.
663 */
664 else
665 {
666 /* Pop the flags. */
667 switch (enmEffOpSize)
668 {
669 case IEMMODE_16BIT:
670 {
671 uint16_t u16Value;
672 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
673 if (rcStrict != VINF_SUCCESS)
674 return rcStrict;
675 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
676
677 /*
678 * Ancient CPU adjustments:
679 * - 8086, 80186, V20/30:
680 * Fixed bits 15:12 bits are not kept correctly internally, mostly for
681 * practical reasons (masking below). We add them when pushing flags.
682 * - 80286:
683 * The NT and IOPL flags cannot be popped from real mode and are
684 * therefore always zero (since a 286 can never exit from PM and
685 * their initial value is zero). This changed on a 386 and can
686 * therefore be used to detect 286 or 386 CPU in real mode.
687 */
688 if ( IEM_GET_TARGET_CPU(pIemCpu) == IEMTARGETCPU_286
689 && !(pCtx->cr0 & X86_CR0_PE) )
690 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL);
691 break;
692 }
693 case IEMMODE_32BIT:
694 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
695 if (rcStrict != VINF_SUCCESS)
696 return rcStrict;
697 break;
698 case IEMMODE_64BIT:
699 {
700 uint64_t u64Value;
701 rcStrict = iemMemStackPopU64(pIemCpu, &u64Value);
702 if (rcStrict != VINF_SUCCESS)
703 return rcStrict;
704 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
705 break;
706 }
707 IEM_NOT_REACHED_DEFAULT_CASE_RET();
708 }
709
710 /* Merge them with the current flags. */
711 const uint32_t fPopfBits = IEMCPU_TO_VM(pIemCpu)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
712 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
713 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
714 || pIemCpu->uCpl == 0)
715 {
716 fEflNew &= fPopfBits;
717 fEflNew |= ~fPopfBits & fEflOld;
718 }
719 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
720 {
721 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
722 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
723 }
724 else
725 {
726 fEflNew &= fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF);
727 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
728 }
729 }
730
731 /*
732 * Commit the flags.
733 */
734 Assert(fEflNew & RT_BIT_32(1));
735 IEMMISC_SET_EFL(pIemCpu, pCtx, fEflNew);
736 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
737
738 return VINF_SUCCESS;
739}
740
741
742/**
743 * Implements an indirect call.
744 *
745 * @param uNewPC The new program counter (RIP) value (loaded from the
746 * operand).
747 * @param enmEffOpSize The effective operand size.
748 */
749IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
750{
751 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
752 uint16_t uOldPC = pCtx->ip + cbInstr;
753 if (uNewPC > pCtx->cs.u32Limit)
754 return iemRaiseGeneralProtectionFault0(pIemCpu);
755
756 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
757 if (rcStrict != VINF_SUCCESS)
758 return rcStrict;
759
760 pCtx->rip = uNewPC;
761 pCtx->eflags.Bits.u1RF = 0;
762 return VINF_SUCCESS;
763}
764
765
766/**
767 * Implements a 16-bit relative call.
768 *
769 * @param offDisp The displacment offset.
770 */
771IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
772{
773 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
774 uint16_t uOldPC = pCtx->ip + cbInstr;
775 uint16_t uNewPC = uOldPC + offDisp;
776 if (uNewPC > pCtx->cs.u32Limit)
777 return iemRaiseGeneralProtectionFault0(pIemCpu);
778
779 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
780 if (rcStrict != VINF_SUCCESS)
781 return rcStrict;
782
783 pCtx->rip = uNewPC;
784 pCtx->eflags.Bits.u1RF = 0;
785 return VINF_SUCCESS;
786}
787
788
789/**
790 * Implements a 32-bit indirect call.
791 *
792 * @param uNewPC The new program counter (RIP) value (loaded from the
793 * operand).
794 * @param enmEffOpSize The effective operand size.
795 */
796IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
797{
798 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
799 uint32_t uOldPC = pCtx->eip + cbInstr;
800 if (uNewPC > pCtx->cs.u32Limit)
801 return iemRaiseGeneralProtectionFault0(pIemCpu);
802
803 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
804 if (rcStrict != VINF_SUCCESS)
805 return rcStrict;
806
807#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE) && defined(VBOX_WITH_CALL_RECORD)
808 /*
809 * CASM hook for recording interesting indirect calls.
810 */
811 if ( !pCtx->eflags.Bits.u1IF
812 && (pCtx->cr0 & X86_CR0_PG)
813 && !CSAMIsEnabled(IEMCPU_TO_VM(pIemCpu))
814 && pIemCpu->uCpl == 0)
815 {
816 EMSTATE enmState = EMGetState(IEMCPU_TO_VMCPU(pIemCpu));
817 if ( enmState == EMSTATE_IEM_THEN_REM
818 || enmState == EMSTATE_IEM
819 || enmState == EMSTATE_REM)
820 CSAMR3RecordCallAddress(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
821 }
822#endif
823
824 pCtx->rip = uNewPC;
825 pCtx->eflags.Bits.u1RF = 0;
826 return VINF_SUCCESS;
827}
828
829
830/**
831 * Implements a 32-bit relative call.
832 *
833 * @param offDisp The displacment offset.
834 */
835IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
836{
837 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
838 uint32_t uOldPC = pCtx->eip + cbInstr;
839 uint32_t uNewPC = uOldPC + offDisp;
840 if (uNewPC > pCtx->cs.u32Limit)
841 return iemRaiseGeneralProtectionFault0(pIemCpu);
842
843 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
844 if (rcStrict != VINF_SUCCESS)
845 return rcStrict;
846
847 pCtx->rip = uNewPC;
848 pCtx->eflags.Bits.u1RF = 0;
849 return VINF_SUCCESS;
850}
851
852
853/**
854 * Implements a 64-bit indirect call.
855 *
856 * @param uNewPC The new program counter (RIP) value (loaded from the
857 * operand).
858 * @param enmEffOpSize The effective operand size.
859 */
860IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
861{
862 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
863 uint64_t uOldPC = pCtx->rip + cbInstr;
864 if (!IEM_IS_CANONICAL(uNewPC))
865 return iemRaiseGeneralProtectionFault0(pIemCpu);
866
867 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
868 if (rcStrict != VINF_SUCCESS)
869 return rcStrict;
870
871 pCtx->rip = uNewPC;
872 pCtx->eflags.Bits.u1RF = 0;
873 return VINF_SUCCESS;
874}
875
876
877/**
878 * Implements a 64-bit relative call.
879 *
880 * @param offDisp The displacment offset.
881 */
882IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
883{
884 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
885 uint64_t uOldPC = pCtx->rip + cbInstr;
886 uint64_t uNewPC = uOldPC + offDisp;
887 if (!IEM_IS_CANONICAL(uNewPC))
888 return iemRaiseNotCanonical(pIemCpu);
889
890 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
891 if (rcStrict != VINF_SUCCESS)
892 return rcStrict;
893
894 pCtx->rip = uNewPC;
895 pCtx->eflags.Bits.u1RF = 0;
896 return VINF_SUCCESS;
897}
898
899
900/**
901 * Implements far jumps and calls thru task segments (TSS).
902 *
903 * @param uSel The selector.
904 * @param enmBranch The kind of branching we're performing.
905 * @param enmEffOpSize The effective operand size.
906 * @param pDesc The descriptor corresponding to @a uSel. The type is
907 * task gate.
908 */
909IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
910{
911#ifndef IEM_IMPLEMENTS_TASKSWITCH
912 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
913#else
914 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
915 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
916 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
917
918 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl
919 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
920 {
921 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
922 pIemCpu->uCpl, (uSel & X86_SEL_RPL)));
923 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
924 }
925
926 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
927 * far calls (see iemCImpl_callf). Most likely in both cases it should be
928 * checked here, need testcases. */
929 if (!pDesc->Legacy.Gen.u1Present)
930 {
931 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel));
932 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
933 }
934
935 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
936 uint32_t uNextEip = pCtx->eip + cbInstr;
937 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
938 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc);
939#endif
940}
941
942
943/**
944 * Implements far jumps and calls thru task gates.
945 *
946 * @param uSel The selector.
947 * @param enmBranch The kind of branching we're performing.
948 * @param enmEffOpSize The effective operand size.
949 * @param pDesc The descriptor corresponding to @a uSel. The type is
950 * task gate.
951 */
952IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
953{
954#ifndef IEM_IMPLEMENTS_TASKSWITCH
955 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
956#else
957 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
958
959 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl
960 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
961 {
962 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
963 pIemCpu->uCpl, (uSel & X86_SEL_RPL)));
964 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
965 }
966
967 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
968 * far calls (see iemCImpl_callf). Most likely in both cases it should be
969 * checked here, need testcases. */
970 if (!pDesc->Legacy.Gen.u1Present)
971 {
972 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel));
973 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
974 }
975
976 /*
977 * Fetch the new TSS descriptor from the GDT.
978 */
979 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel;
980 if (uSelTss & X86_SEL_LDT)
981 {
982 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss));
983 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
984 }
985
986 IEMSELDESC TssDesc;
987 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &TssDesc, uSelTss, X86_XCPT_GP);
988 if (rcStrict != VINF_SUCCESS)
989 return rcStrict;
990
991 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK)
992 {
993 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss,
994 TssDesc.Legacy.Gate.u4Type));
995 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
996 }
997
998 if (!TssDesc.Legacy.Gate.u1Present)
999 {
1000 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss));
1001 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSelTss & X86_SEL_MASK_OFF_RPL);
1002 }
1003
1004 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1005 uint32_t uNextEip = pCtx->eip + cbInstr;
1006 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1007 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc);
1008#endif
1009}
1010
1011
1012/**
1013 * Implements far jumps and calls thru call gates.
1014 *
1015 * @param uSel The selector.
1016 * @param enmBranch The kind of branching we're performing.
1017 * @param enmEffOpSize The effective operand size.
1018 * @param pDesc The descriptor corresponding to @a uSel. The type is
1019 * call gate.
1020 */
1021IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1022{
1023#ifndef IEM_IMPLEMENTS_CALLGATE
1024 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1025#else
1026 /* NB: Far jumps can only do intra-privilege transfers. Far calls support
1027 * inter-privilege calls and are much more complex.
1028 *
1029 * NB: 64-bit call gate has the same type as a 32-bit call gate! If
1030 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
1031 * must be 16-bit or 32-bit.
1032 */
1033 /** @todo: effective operand size is probably irrelevant here, only the
1034 * call gate bitness matters??
1035 */
1036 VBOXSTRICTRC rcStrict;
1037 RTPTRUNION uPtrRet;
1038 uint64_t uNewRsp;
1039 uint64_t uNewRip;
1040 uint64_t u64Base;
1041 uint32_t cbLimit;
1042 RTSEL uNewCS;
1043 IEMSELDESC DescCS;
1044 PCPUMCTX pCtx;
1045
1046 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE);
1047 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1048 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE
1049 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE);
1050
1051 /* Determine the new instruction pointer from the gate descriptor. */
1052 uNewRip = pDesc->Legacy.Gate.u16OffsetLow
1053 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16)
1054 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32);
1055
1056 /* Perform DPL checks on the gate descriptor. */
1057 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl
1058 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1059 {
1060 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1061 pIemCpu->uCpl, (uSel & X86_SEL_RPL)));
1062 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1063 }
1064
1065 /** @todo does this catch NULL selectors, too? */
1066 if (!pDesc->Legacy.Gen.u1Present)
1067 {
1068 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel));
1069 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1070 }
1071
1072 /*
1073 * Fetch the target CS descriptor from the GDT or LDT.
1074 */
1075 uNewCS = pDesc->Legacy.Gate.u16Sel;
1076 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_GP);
1077 if (rcStrict != VINF_SUCCESS)
1078 return rcStrict;
1079
1080 /* Target CS must be a code selector. */
1081 if ( !DescCS.Legacy.Gen.u1DescType
1082 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1083 {
1084 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1085 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1086 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1087 }
1088
1089 /* Privilege checks on target CS. */
1090 if (enmBranch == IEMBRANCH_JUMP)
1091 {
1092 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1093 {
1094 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
1095 {
1096 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1097 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1098 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1099 }
1100 }
1101 else
1102 {
1103 if (DescCS.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
1104 {
1105 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1106 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1107 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1108 }
1109 }
1110 }
1111 else
1112 {
1113 Assert(enmBranch == IEMBRANCH_CALL);
1114 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
1115 {
1116 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1117 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1118 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1119 }
1120 }
1121
1122 /* Additional long mode checks. */
1123 if (IEM_IS_LONG_MODE(pIemCpu))
1124 {
1125 if (!DescCS.Legacy.Gen.u1Long)
1126 {
1127 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS));
1128 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1129 }
1130
1131 /* L vs D. */
1132 if ( DescCS.Legacy.Gen.u1Long
1133 && DescCS.Legacy.Gen.u1DefBig)
1134 {
1135 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS));
1136 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1137 }
1138 }
1139
1140 if (!DescCS.Legacy.Gate.u1Present)
1141 {
1142 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS));
1143 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCS);
1144 }
1145
1146 pCtx = pIemCpu->CTX_SUFF(pCtx);
1147
1148 if (enmBranch == IEMBRANCH_JUMP)
1149 {
1150 /** @todo: This is very similar to regular far jumps; merge! */
1151 /* Jumps are fairly simple... */
1152
1153 /* Chop the high bits off if 16-bit gate (Intel says so). */
1154 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1155 uNewRip = (uint16_t)uNewRip;
1156
1157 /* Limit check for non-long segments. */
1158 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1159 if (DescCS.Legacy.Gen.u1Long)
1160 u64Base = 0;
1161 else
1162 {
1163 if (uNewRip > cbLimit)
1164 {
1165 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
1166 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
1167 }
1168 u64Base = X86DESC_BASE(&DescCS.Legacy);
1169 }
1170
1171 /* Canonical address check. */
1172 if (!IEM_IS_CANONICAL(uNewRip))
1173 {
1174 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1175 return iemRaiseNotCanonical(pIemCpu);
1176 }
1177
1178 /*
1179 * Ok, everything checked out fine. Now set the accessed bit before
1180 * committing the result into CS, CSHID and RIP.
1181 */
1182 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1183 {
1184 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
1185 if (rcStrict != VINF_SUCCESS)
1186 return rcStrict;
1187 /** @todo check what VT-x and AMD-V does. */
1188 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1189 }
1190
1191 /* commit */
1192 pCtx->rip = uNewRip;
1193 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1194 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1195 pCtx->cs.ValidSel = pCtx->cs.Sel;
1196 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1197 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1198 pCtx->cs.u32Limit = cbLimit;
1199 pCtx->cs.u64Base = u64Base;
1200 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1201 }
1202 else
1203 {
1204 Assert(enmBranch == IEMBRANCH_CALL);
1205 /* Calls are much more complicated. */
1206
1207 if (DescCS.Legacy.Gen.u2Dpl < pIemCpu->uCpl)
1208 {
1209 uint16_t offNewStack; /* Offset of new stack in TSS. */
1210 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */
1211 uint8_t uNewCSDpl;
1212 uint8_t cbWords;
1213 RTSEL uNewSS;
1214 RTSEL uOldSS;
1215 uint64_t uOldRsp;
1216 IEMSELDESC DescSS;
1217 RTPTRUNION uPtrTSS;
1218 RTGCPTR GCPtrTSS;
1219 RTPTRUNION uPtrParmWds;
1220 RTGCPTR GCPtrParmWds;
1221
1222 /* More privilege. This is the fun part. */
1223 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */
1224
1225 /*
1226 * Determine new SS:rSP from the TSS.
1227 */
1228 Assert(!pCtx->tr.Attr.n.u1DescType);
1229
1230 /* Figure out where the new stack pointer is stored in the TSS. */
1231 uNewCSDpl = uNewCS & X86_SEL_RPL;
1232 if (!IEM_IS_LONG_MODE(pIemCpu))
1233 {
1234 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1235 {
1236 offNewStack = RT_OFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8;
1237 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0);
1238 }
1239 else
1240 {
1241 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1242 offNewStack = RT_OFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4;
1243 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0);
1244 }
1245 }
1246 else
1247 {
1248 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1249 offNewStack = RT_OFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0);
1250 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0);
1251 }
1252
1253 /* Check against TSS limit. */
1254 if ((uint16_t)(offNewStack + cbNewStack - 1) > pCtx->tr.u32Limit)
1255 {
1256 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pCtx->tr.u32Limit));
1257 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, pCtx->tr.Sel);
1258 }
1259
1260 GCPtrTSS = pCtx->tr.u64Base + offNewStack;
1261 rcStrict = iemMemMap(pIemCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R);
1262 if (rcStrict != VINF_SUCCESS)
1263 {
1264 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1265 return rcStrict;
1266 }
1267
1268 if (!IEM_IS_LONG_MODE(pIemCpu))
1269 {
1270 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1271 {
1272 uNewRsp = uPtrTSS.pu32[0];
1273 uNewSS = uPtrTSS.pu16[2];
1274 }
1275 else
1276 {
1277 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1278 uNewRsp = uPtrTSS.pu16[0];
1279 uNewSS = uPtrTSS.pu16[1];
1280 }
1281 }
1282 else
1283 {
1284 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1285 /* SS will be a NULL selector, but that's valid. */
1286 uNewRsp = uPtrTSS.pu64[0];
1287 uNewSS = uNewCSDpl;
1288 }
1289
1290 /* Done with the TSS now. */
1291 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);
1292 if (rcStrict != VINF_SUCCESS)
1293 {
1294 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1295 return rcStrict;
1296 }
1297
1298 /* Only used outside of long mode. */
1299 cbWords = pDesc->Legacy.Gate.u4ParmCount;
1300
1301 /* If EFER.LMA is 0, there's extra work to do. */
1302 if (!IEM_IS_LONG_MODE(pIemCpu))
1303 {
1304 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
1305 {
1306 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
1307 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS);
1308 }
1309
1310 /* Grab the new SS descriptor. */
1311 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_SS);
1312 if (rcStrict != VINF_SUCCESS)
1313 return rcStrict;
1314
1315 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */
1316 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL))
1317 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl))
1318 {
1319 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n",
1320 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl));
1321 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS);
1322 }
1323
1324 /* Ensure new SS is a writable data segment. */
1325 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
1326 {
1327 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
1328 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS);
1329 }
1330
1331 if (!DescSS.Legacy.Gen.u1Present)
1332 {
1333 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS));
1334 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
1335 }
1336 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1337 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords);
1338 else
1339 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords);
1340 }
1341 else
1342 {
1343 /* Just grab the new (NULL) SS descriptor. */
1344 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_SS);
1345 if (rcStrict != VINF_SUCCESS)
1346 return rcStrict;
1347
1348 cbNewStack = sizeof(uint64_t) * 4;
1349 }
1350
1351 /** @todo: According to Intel, new stack is checked for enough space first,
1352 * then switched. According to AMD, the stack is switched first and
1353 * then pushes might fault!
1354 */
1355
1356 /** @todo: According to AMD, CS is loaded first, then SS.
1357 * According to Intel, it's the other way around!?
1358 */
1359
1360 /** @todo: Intel and AMD disagree on when exactly the CPL changes! */
1361
1362 /* Set the accessed bit before committing new SS. */
1363 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1364 {
1365 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
1366 if (rcStrict != VINF_SUCCESS)
1367 return rcStrict;
1368 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1369 }
1370
1371 /* Remember the old SS:rSP and their linear address. */
1372 uOldSS = pCtx->ss.Sel;
1373 uOldRsp = pCtx->rsp;
1374
1375 GCPtrParmWds = pCtx->ss.u64Base + pCtx->rsp;
1376
1377 /* Commit new SS:rSP. */
1378 pCtx->ss.Sel = uNewSS;
1379 pCtx->ss.ValidSel = uNewSS;
1380 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1381 pCtx->ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);
1382 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1383 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1384 pCtx->rsp = uNewRsp;
1385 pIemCpu->uCpl = uNewCSDpl;
1386 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
1387 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
1388
1389 /* Check new stack - may #SS(NewSS). */
1390 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbNewStack,
1391 &uPtrRet.pv, &uNewRsp);
1392 if (rcStrict != VINF_SUCCESS)
1393 {
1394 Log(("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1395 return rcStrict;
1396 }
1397
1398 if (!IEM_IS_LONG_MODE(pIemCpu))
1399 {
1400 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1401 {
1402 /* Push the old CS:rIP. */
1403 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1404 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1405
1406 /* Map the relevant chunk of the old stack. */
1407 rcStrict = iemMemMap(pIemCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1408 if (rcStrict != VINF_SUCCESS)
1409 {
1410 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1411 return rcStrict;
1412 }
1413
1414 /* Copy the parameter (d)words. */
1415 for (int i = 0; i < cbWords; ++i)
1416 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i];
1417
1418 /* Unmap the old stack. */
1419 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1420 if (rcStrict != VINF_SUCCESS)
1421 {
1422 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1423 return rcStrict;
1424 }
1425
1426 /* Push the old SS:rSP. */
1427 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
1428 uPtrRet.pu32[2 + cbWords + 1] = uOldSS;
1429 }
1430 else
1431 {
1432 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1433
1434 /* Push the old CS:rIP. */
1435 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1436 uPtrRet.pu16[1] = pCtx->cs.Sel;
1437
1438 /* Map the relevant chunk of the old stack. */
1439 rcStrict = iemMemMap(pIemCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1440 if (rcStrict != VINF_SUCCESS)
1441 {
1442 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1443 return rcStrict;
1444 }
1445
1446 /* Copy the parameter words. */
1447 for (int i = 0; i < cbWords; ++i)
1448 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i];
1449
1450 /* Unmap the old stack. */
1451 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1452 if (rcStrict != VINF_SUCCESS)
1453 {
1454 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1455 return rcStrict;
1456 }
1457
1458 /* Push the old SS:rSP. */
1459 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp;
1460 uPtrRet.pu16[2 + cbWords + 1] = uOldSS;
1461 }
1462 }
1463 else
1464 {
1465 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1466
1467 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
1468 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1469 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1470 uPtrRet.pu64[2] = uOldRsp;
1471 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */
1472 }
1473
1474 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1475 if (rcStrict != VINF_SUCCESS)
1476 {
1477 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1478 return rcStrict;
1479 }
1480
1481 /* Chop the high bits off if 16-bit gate (Intel says so). */
1482 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1483 uNewRip = (uint16_t)uNewRip;
1484
1485 /* Limit / canonical check. */
1486 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1487 if (!IEM_IS_LONG_MODE(pIemCpu))
1488 {
1489 if (uNewRip > cbLimit)
1490 {
1491 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1492 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
1493 }
1494 u64Base = X86DESC_BASE(&DescCS.Legacy);
1495 }
1496 else
1497 {
1498 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1499 if (!IEM_IS_CANONICAL(uNewRip))
1500 {
1501 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1502 return iemRaiseNotCanonical(pIemCpu);
1503 }
1504 u64Base = 0;
1505 }
1506
1507 /*
1508 * Now set the accessed bit before
1509 * writing the return address to the stack and committing the result into
1510 * CS, CSHID and RIP.
1511 */
1512 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1513 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1514 {
1515 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
1516 if (rcStrict != VINF_SUCCESS)
1517 return rcStrict;
1518 /** @todo check what VT-x and AMD-V does. */
1519 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1520 }
1521
1522 /* Commit new CS:rIP. */
1523 pCtx->rip = uNewRip;
1524 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1525 pCtx->cs.Sel |= pIemCpu->uCpl;
1526 pCtx->cs.ValidSel = pCtx->cs.Sel;
1527 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1528 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1529 pCtx->cs.u32Limit = cbLimit;
1530 pCtx->cs.u64Base = u64Base;
1531 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1532 }
1533 else
1534 {
1535 /* Same privilege. */
1536 /** @todo: This is very similar to regular far calls; merge! */
1537
1538 /* Check stack first - may #SS(0). */
1539 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
1540 * 16-bit code cause a two or four byte CS to be pushed? */
1541 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1542 IEM_IS_LONG_MODE(pIemCpu) ? 8+8
1543 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
1544 &uPtrRet.pv, &uNewRsp);
1545 if (rcStrict != VINF_SUCCESS)
1546 return rcStrict;
1547
1548 /* Chop the high bits off if 16-bit gate (Intel says so). */
1549 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1550 uNewRip = (uint16_t)uNewRip;
1551
1552 /* Limit / canonical check. */
1553 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1554 if (!IEM_IS_LONG_MODE(pIemCpu))
1555 {
1556 if (uNewRip > cbLimit)
1557 {
1558 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1559 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
1560 }
1561 u64Base = X86DESC_BASE(&DescCS.Legacy);
1562 }
1563 else
1564 {
1565 if (!IEM_IS_CANONICAL(uNewRip))
1566 {
1567 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1568 return iemRaiseNotCanonical(pIemCpu);
1569 }
1570 u64Base = 0;
1571 }
1572
1573 /*
1574 * Now set the accessed bit before
1575 * writing the return address to the stack and committing the result into
1576 * CS, CSHID and RIP.
1577 */
1578 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1579 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1580 {
1581 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
1582 if (rcStrict != VINF_SUCCESS)
1583 return rcStrict;
1584 /** @todo check what VT-x and AMD-V does. */
1585 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1586 }
1587
1588 /* stack */
1589 if (!IEM_IS_LONG_MODE(pIemCpu))
1590 {
1591 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1592 {
1593 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1594 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1595 }
1596 else
1597 {
1598 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1599 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1600 uPtrRet.pu16[1] = pCtx->cs.Sel;
1601 }
1602 }
1603 else
1604 {
1605 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1606 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1607 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1608 }
1609
1610 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1611 if (rcStrict != VINF_SUCCESS)
1612 return rcStrict;
1613
1614 /* commit */
1615 pCtx->rip = uNewRip;
1616 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1617 pCtx->cs.Sel |= pIemCpu->uCpl;
1618 pCtx->cs.ValidSel = pCtx->cs.Sel;
1619 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1620 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1621 pCtx->cs.u32Limit = cbLimit;
1622 pCtx->cs.u64Base = u64Base;
1623 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1624 }
1625 }
1626 pCtx->eflags.Bits.u1RF = 0;
1627 return VINF_SUCCESS;
1628#endif
1629}
1630
1631
1632/**
1633 * Implements far jumps and calls thru system selectors.
1634 *
1635 * @param uSel The selector.
1636 * @param enmBranch The kind of branching we're performing.
1637 * @param enmEffOpSize The effective operand size.
1638 * @param pDesc The descriptor corresponding to @a uSel.
1639 */
1640IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1641{
1642 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1643 Assert((uSel & X86_SEL_MASK_OFF_RPL));
1644
1645 if (IEM_IS_LONG_MODE(pIemCpu))
1646 switch (pDesc->Legacy.Gen.u4Type)
1647 {
1648 case AMD64_SEL_TYPE_SYS_CALL_GATE:
1649 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1650
1651 default:
1652 case AMD64_SEL_TYPE_SYS_LDT:
1653 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
1654 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
1655 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
1656 case AMD64_SEL_TYPE_SYS_INT_GATE:
1657 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1658 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1659 }
1660
1661 switch (pDesc->Legacy.Gen.u4Type)
1662 {
1663 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1664 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1665 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1666
1667 case X86_SEL_TYPE_SYS_TASK_GATE:
1668 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
1669
1670 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1671 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1672 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
1673
1674 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1675 Log(("branch %04x -> busy 286 TSS\n", uSel));
1676 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1677
1678 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1679 Log(("branch %04x -> busy 386 TSS\n", uSel));
1680 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1681
1682 default:
1683 case X86_SEL_TYPE_SYS_LDT:
1684 case X86_SEL_TYPE_SYS_286_INT_GATE:
1685 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1686 case X86_SEL_TYPE_SYS_386_INT_GATE:
1687 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1688 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1689 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1690 }
1691}
1692
1693
1694/**
1695 * Implements far jumps.
1696 *
1697 * @param uSel The selector.
1698 * @param offSeg The segment offset.
1699 * @param enmEffOpSize The effective operand size.
1700 */
1701IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1702{
1703 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1704 NOREF(cbInstr);
1705 Assert(offSeg <= UINT32_MAX);
1706
1707 /*
1708 * Real mode and V8086 mode are easy. The only snag seems to be that
1709 * CS.limit doesn't change and the limit check is done against the current
1710 * limit.
1711 */
1712 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1713 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1714 {
1715 if (offSeg > pCtx->cs.u32Limit)
1716 return iemRaiseGeneralProtectionFault0(pIemCpu);
1717
1718 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
1719 pCtx->rip = offSeg;
1720 else
1721 pCtx->rip = offSeg & UINT16_MAX;
1722 pCtx->cs.Sel = uSel;
1723 pCtx->cs.ValidSel = uSel;
1724 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1725 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1726 pCtx->eflags.Bits.u1RF = 0;
1727 return VINF_SUCCESS;
1728 }
1729
1730 /*
1731 * Protected mode. Need to parse the specified descriptor...
1732 */
1733 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1734 {
1735 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1736 return iemRaiseGeneralProtectionFault0(pIemCpu);
1737 }
1738
1739 /* Fetch the descriptor. */
1740 IEMSELDESC Desc;
1741 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP);
1742 if (rcStrict != VINF_SUCCESS)
1743 return rcStrict;
1744
1745 /* Is it there? */
1746 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
1747 {
1748 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1749 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1750 }
1751
1752 /*
1753 * Deal with it according to its type. We do the standard code selectors
1754 * here and dispatch the system selectors to worker functions.
1755 */
1756 if (!Desc.Legacy.Gen.u1DescType)
1757 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
1758
1759 /* Only code segments. */
1760 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1761 {
1762 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1763 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1764 }
1765
1766 /* L vs D. */
1767 if ( Desc.Legacy.Gen.u1Long
1768 && Desc.Legacy.Gen.u1DefBig
1769 && IEM_IS_LONG_MODE(pIemCpu))
1770 {
1771 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1772 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1773 }
1774
1775 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1776 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1777 {
1778 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1779 {
1780 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1781 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1782 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1783 }
1784 }
1785 else
1786 {
1787 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1788 {
1789 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1790 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1791 }
1792 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1793 {
1794 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1795 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1796 }
1797 }
1798
1799 /* Chop the high bits if 16-bit (Intel says so). */
1800 if (enmEffOpSize == IEMMODE_16BIT)
1801 offSeg &= UINT16_MAX;
1802
1803 /* Limit check. (Should alternatively check for non-canonical addresses
1804 here, but that is ruled out by offSeg being 32-bit, right?) */
1805 uint64_t u64Base;
1806 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1807 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1808 u64Base = 0;
1809 else
1810 {
1811 if (offSeg > cbLimit)
1812 {
1813 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1814 /** @todo: Intel says this is #GP(0)! */
1815 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1816 }
1817 u64Base = X86DESC_BASE(&Desc.Legacy);
1818 }
1819
1820 /*
1821 * Ok, everything checked out fine. Now set the accessed bit before
1822 * committing the result into CS, CSHID and RIP.
1823 */
1824 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1825 {
1826 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1827 if (rcStrict != VINF_SUCCESS)
1828 return rcStrict;
1829 /** @todo check what VT-x and AMD-V does. */
1830 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1831 }
1832
1833 /* commit */
1834 pCtx->rip = offSeg;
1835 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1836 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1837 pCtx->cs.ValidSel = pCtx->cs.Sel;
1838 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1839 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1840 pCtx->cs.u32Limit = cbLimit;
1841 pCtx->cs.u64Base = u64Base;
1842 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1843 pCtx->eflags.Bits.u1RF = 0;
1844 /** @todo check if the hidden bits are loaded correctly for 64-bit
1845 * mode. */
1846 return VINF_SUCCESS;
1847}
1848
1849
1850/**
1851 * Implements far calls.
1852 *
1853 * This very similar to iemCImpl_FarJmp.
1854 *
1855 * @param uSel The selector.
1856 * @param offSeg The segment offset.
1857 * @param enmEffOpSize The operand size (in case we need it).
1858 */
1859IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1860{
1861 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1862 VBOXSTRICTRC rcStrict;
1863 uint64_t uNewRsp;
1864 RTPTRUNION uPtrRet;
1865
1866 /*
1867 * Real mode and V8086 mode are easy. The only snag seems to be that
1868 * CS.limit doesn't change and the limit check is done against the current
1869 * limit.
1870 */
1871 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1872 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1873 {
1874 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1875
1876 /* Check stack first - may #SS(0). */
1877 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1878 &uPtrRet.pv, &uNewRsp);
1879 if (rcStrict != VINF_SUCCESS)
1880 return rcStrict;
1881
1882 /* Check the target address range. */
1883 if (offSeg > UINT32_MAX)
1884 return iemRaiseGeneralProtectionFault0(pIemCpu);
1885
1886 /* Everything is fine, push the return address. */
1887 if (enmEffOpSize == IEMMODE_16BIT)
1888 {
1889 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1890 uPtrRet.pu16[1] = pCtx->cs.Sel;
1891 }
1892 else
1893 {
1894 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1895 uPtrRet.pu16[3] = pCtx->cs.Sel;
1896 }
1897 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1898 if (rcStrict != VINF_SUCCESS)
1899 return rcStrict;
1900
1901 /* Branch. */
1902 pCtx->rip = offSeg;
1903 pCtx->cs.Sel = uSel;
1904 pCtx->cs.ValidSel = uSel;
1905 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1906 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1907 pCtx->eflags.Bits.u1RF = 0;
1908 return VINF_SUCCESS;
1909 }
1910
1911 /*
1912 * Protected mode. Need to parse the specified descriptor...
1913 */
1914 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1915 {
1916 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1917 return iemRaiseGeneralProtectionFault0(pIemCpu);
1918 }
1919
1920 /* Fetch the descriptor. */
1921 IEMSELDESC Desc;
1922 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP);
1923 if (rcStrict != VINF_SUCCESS)
1924 return rcStrict;
1925
1926 /*
1927 * Deal with it according to its type. We do the standard code selectors
1928 * here and dispatch the system selectors to worker functions.
1929 */
1930 if (!Desc.Legacy.Gen.u1DescType)
1931 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1932
1933 /* Only code segments. */
1934 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1935 {
1936 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1937 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1938 }
1939
1940 /* L vs D. */
1941 if ( Desc.Legacy.Gen.u1Long
1942 && Desc.Legacy.Gen.u1DefBig
1943 && IEM_IS_LONG_MODE(pIemCpu))
1944 {
1945 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1946 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1947 }
1948
1949 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1950 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1951 {
1952 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1953 {
1954 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1955 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1956 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1957 }
1958 }
1959 else
1960 {
1961 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1962 {
1963 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1964 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1965 }
1966 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1967 {
1968 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1969 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1970 }
1971 }
1972
1973 /* Is it there? */
1974 if (!Desc.Legacy.Gen.u1Present)
1975 {
1976 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1977 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1978 }
1979
1980 /* Check stack first - may #SS(0). */
1981 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1982 * 16-bit code cause a two or four byte CS to be pushed? */
1983 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1984 enmEffOpSize == IEMMODE_64BIT ? 8+8
1985 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1986 &uPtrRet.pv, &uNewRsp);
1987 if (rcStrict != VINF_SUCCESS)
1988 return rcStrict;
1989
1990 /* Chop the high bits if 16-bit (Intel says so). */
1991 if (enmEffOpSize == IEMMODE_16BIT)
1992 offSeg &= UINT16_MAX;
1993
1994 /* Limit / canonical check. */
1995 uint64_t u64Base;
1996 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1997 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1998 {
1999 if (!IEM_IS_CANONICAL(offSeg))
2000 {
2001 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
2002 return iemRaiseNotCanonical(pIemCpu);
2003 }
2004 u64Base = 0;
2005 }
2006 else
2007 {
2008 if (offSeg > cbLimit)
2009 {
2010 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
2011 /** @todo: Intel says this is #GP(0)! */
2012 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2013 }
2014 u64Base = X86DESC_BASE(&Desc.Legacy);
2015 }
2016
2017 /*
2018 * Now set the accessed bit before
2019 * writing the return address to the stack and committing the result into
2020 * CS, CSHID and RIP.
2021 */
2022 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2023 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2024 {
2025 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2026 if (rcStrict != VINF_SUCCESS)
2027 return rcStrict;
2028 /** @todo check what VT-x and AMD-V does. */
2029 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2030 }
2031
2032 /* stack */
2033 if (enmEffOpSize == IEMMODE_16BIT)
2034 {
2035 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
2036 uPtrRet.pu16[1] = pCtx->cs.Sel;
2037 }
2038 else if (enmEffOpSize == IEMMODE_32BIT)
2039 {
2040 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
2041 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
2042 }
2043 else
2044 {
2045 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
2046 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
2047 }
2048 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
2049 if (rcStrict != VINF_SUCCESS)
2050 return rcStrict;
2051
2052 /* commit */
2053 pCtx->rip = offSeg;
2054 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2055 pCtx->cs.Sel |= pIemCpu->uCpl;
2056 pCtx->cs.ValidSel = pCtx->cs.Sel;
2057 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2058 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2059 pCtx->cs.u32Limit = cbLimit;
2060 pCtx->cs.u64Base = u64Base;
2061 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
2062 pCtx->eflags.Bits.u1RF = 0;
2063 /** @todo check if the hidden bits are loaded correctly for 64-bit
2064 * mode. */
2065 return VINF_SUCCESS;
2066}
2067
2068
2069/**
2070 * Implements retf.
2071 *
2072 * @param enmEffOpSize The effective operand size.
2073 * @param cbPop The amount of arguments to pop from the stack
2074 * (bytes).
2075 */
2076IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2077{
2078 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2079 VBOXSTRICTRC rcStrict;
2080 RTCPTRUNION uPtrFrame;
2081 uint64_t uNewRsp;
2082 uint64_t uNewRip;
2083 uint16_t uNewCs;
2084 NOREF(cbInstr);
2085
2086 /*
2087 * Read the stack values first.
2088 */
2089 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
2090 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
2091 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
2092 if (rcStrict != VINF_SUCCESS)
2093 return rcStrict;
2094 if (enmEffOpSize == IEMMODE_16BIT)
2095 {
2096 uNewRip = uPtrFrame.pu16[0];
2097 uNewCs = uPtrFrame.pu16[1];
2098 }
2099 else if (enmEffOpSize == IEMMODE_32BIT)
2100 {
2101 uNewRip = uPtrFrame.pu32[0];
2102 uNewCs = uPtrFrame.pu16[2];
2103 }
2104 else
2105 {
2106 uNewRip = uPtrFrame.pu64[0];
2107 uNewCs = uPtrFrame.pu16[4];
2108 }
2109
2110 /*
2111 * Real mode and V8086 mode are easy.
2112 */
2113 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2114 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2115 {
2116 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2117 /** @todo check how this is supposed to work if sp=0xfffe. */
2118
2119 /* Check the limit of the new EIP. */
2120 /** @todo Intel pseudo code only does the limit check for 16-bit
2121 * operands, AMD does not make any distinction. What is right? */
2122 if (uNewRip > pCtx->cs.u32Limit)
2123 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2124
2125 /* commit the operation. */
2126 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
2127 if (rcStrict != VINF_SUCCESS)
2128 return rcStrict;
2129 pCtx->rip = uNewRip;
2130 pCtx->cs.Sel = uNewCs;
2131 pCtx->cs.ValidSel = uNewCs;
2132 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2133 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2134 pCtx->eflags.Bits.u1RF = 0;
2135 /** @todo do we load attribs and limit as well? */
2136 if (cbPop)
2137 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2138 return VINF_SUCCESS;
2139 }
2140
2141 /*
2142 * Protected mode is complicated, of course.
2143 */
2144 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2145 {
2146 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
2147 return iemRaiseGeneralProtectionFault0(pIemCpu);
2148 }
2149
2150 /* Fetch the descriptor. */
2151 IEMSELDESC DescCs;
2152 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs, X86_XCPT_GP);
2153 if (rcStrict != VINF_SUCCESS)
2154 return rcStrict;
2155
2156 /* Can only return to a code selector. */
2157 if ( !DescCs.Legacy.Gen.u1DescType
2158 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
2159 {
2160 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
2161 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
2162 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2163 }
2164
2165 /* L vs D. */
2166 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
2167 && DescCs.Legacy.Gen.u1DefBig
2168 && IEM_IS_LONG_MODE(pIemCpu))
2169 {
2170 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
2171 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2172 }
2173
2174 /* DPL/RPL/CPL checks. */
2175 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2176 {
2177 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
2178 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2179 }
2180
2181 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2182 {
2183 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
2184 {
2185 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
2186 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2187 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2188 }
2189 }
2190 else
2191 {
2192 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
2193 {
2194 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
2195 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2196 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2197 }
2198 }
2199
2200 /* Is it there? */
2201 if (!DescCs.Legacy.Gen.u1Present)
2202 {
2203 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
2204 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2205 }
2206
2207 /*
2208 * Return to outer privilege? (We'll typically have entered via a call gate.)
2209 */
2210 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2211 {
2212 /* Read the outer stack pointer stored *after* the parameters. */
2213 RTCPTRUNION uPtrStack;
2214 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
2215 if (rcStrict != VINF_SUCCESS)
2216 return rcStrict;
2217
2218 uPtrStack.pu8 += cbPop; /* Skip the parameters. */
2219
2220 uint16_t uNewOuterSs;
2221 uint64_t uNewOuterRsp;
2222 if (enmEffOpSize == IEMMODE_16BIT)
2223 {
2224 uNewOuterRsp = uPtrStack.pu16[0];
2225 uNewOuterSs = uPtrStack.pu16[1];
2226 }
2227 else if (enmEffOpSize == IEMMODE_32BIT)
2228 {
2229 uNewOuterRsp = uPtrStack.pu32[0];
2230 uNewOuterSs = uPtrStack.pu16[2];
2231 }
2232 else
2233 {
2234 uNewOuterRsp = uPtrStack.pu64[0];
2235 uNewOuterSs = uPtrStack.pu16[4];
2236 }
2237
2238 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
2239 and read the selector. */
2240 IEMSELDESC DescSs;
2241 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
2242 {
2243 if ( !DescCs.Legacy.Gen.u1Long
2244 || (uNewOuterSs & X86_SEL_RPL) == 3)
2245 {
2246 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
2247 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2248 return iemRaiseGeneralProtectionFault0(pIemCpu);
2249 }
2250 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
2251 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
2252 }
2253 else
2254 {
2255 /* Fetch the descriptor for the new stack segment. */
2256 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);
2257 if (rcStrict != VINF_SUCCESS)
2258 return rcStrict;
2259 }
2260
2261 /* Check that RPL of stack and code selectors match. */
2262 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
2263 {
2264 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2265 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2266 }
2267
2268 /* Must be a writable data segment. */
2269 if ( !DescSs.Legacy.Gen.u1DescType
2270 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2271 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2272 {
2273 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
2274 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
2275 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2276 }
2277
2278 /* L vs D. (Not mentioned by intel.) */
2279 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
2280 && DescSs.Legacy.Gen.u1DefBig
2281 && IEM_IS_LONG_MODE(pIemCpu))
2282 {
2283 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
2284 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2285 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2286 }
2287
2288 /* DPL/RPL/CPL checks. */
2289 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2290 {
2291 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
2292 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
2293 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2294 }
2295
2296 /* Is it there? */
2297 if (!DescSs.Legacy.Gen.u1Present)
2298 {
2299 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2300 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2301 }
2302
2303 /* Calc SS limit.*/
2304 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
2305
2306 /* Is RIP canonical or within CS.limit? */
2307 uint64_t u64Base;
2308 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2309
2310 /** @todo Testcase: Is this correct? */
2311 if ( DescCs.Legacy.Gen.u1Long
2312 && IEM_IS_LONG_MODE(pIemCpu) )
2313 {
2314 if (!IEM_IS_CANONICAL(uNewRip))
2315 {
2316 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2317 return iemRaiseNotCanonical(pIemCpu);
2318 }
2319 u64Base = 0;
2320 }
2321 else
2322 {
2323 if (uNewRip > cbLimitCs)
2324 {
2325 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
2326 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
2327 /** @todo: Intel says this is #GP(0)! */
2328 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2329 }
2330 u64Base = X86DESC_BASE(&DescCs.Legacy);
2331 }
2332
2333 /*
2334 * Now set the accessed bit before
2335 * writing the return address to the stack and committing the result into
2336 * CS, CSHID and RIP.
2337 */
2338 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
2339 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2340 {
2341 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2342 if (rcStrict != VINF_SUCCESS)
2343 return rcStrict;
2344 /** @todo check what VT-x and AMD-V does. */
2345 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2346 }
2347 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
2348 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2349 {
2350 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
2351 if (rcStrict != VINF_SUCCESS)
2352 return rcStrict;
2353 /** @todo check what VT-x and AMD-V does. */
2354 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2355 }
2356
2357 /* commit */
2358 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
2359 if (rcStrict != VINF_SUCCESS)
2360 return rcStrict;
2361 if (enmEffOpSize == IEMMODE_16BIT)
2362 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2363 else
2364 pCtx->rip = uNewRip;
2365 pCtx->cs.Sel = uNewCs;
2366 pCtx->cs.ValidSel = uNewCs;
2367 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2368 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2369 pCtx->cs.u32Limit = cbLimitCs;
2370 pCtx->cs.u64Base = u64Base;
2371 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
2372 pCtx->rsp = uNewOuterRsp;
2373 pCtx->ss.Sel = uNewOuterSs;
2374 pCtx->ss.ValidSel = uNewOuterSs;
2375 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2376 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
2377 pCtx->ss.u32Limit = cbLimitSs;
2378 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2379 pCtx->ss.u64Base = 0;
2380 else
2381 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
2382
2383 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
2384 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2385 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2386 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2387 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2388
2389 /** @todo check if the hidden bits are loaded correctly for 64-bit
2390 * mode. */
2391
2392 if (cbPop)
2393 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2394 pCtx->eflags.Bits.u1RF = 0;
2395
2396 /* Done! */
2397 }
2398 /*
2399 * Return to the same privilege level
2400 */
2401 else
2402 {
2403 /* Limit / canonical check. */
2404 uint64_t u64Base;
2405 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2406
2407 /** @todo Testcase: Is this correct? */
2408 if ( DescCs.Legacy.Gen.u1Long
2409 && IEM_IS_LONG_MODE(pIemCpu) )
2410 {
2411 if (!IEM_IS_CANONICAL(uNewRip))
2412 {
2413 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
2414 return iemRaiseNotCanonical(pIemCpu);
2415 }
2416 u64Base = 0;
2417 }
2418 else
2419 {
2420 if (uNewRip > cbLimitCs)
2421 {
2422 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
2423 /** @todo: Intel says this is #GP(0)! */
2424 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2425 }
2426 u64Base = X86DESC_BASE(&DescCs.Legacy);
2427 }
2428
2429 /*
2430 * Now set the accessed bit before
2431 * writing the return address to the stack and committing the result into
2432 * CS, CSHID and RIP.
2433 */
2434 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2435 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2436 {
2437 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2438 if (rcStrict != VINF_SUCCESS)
2439 return rcStrict;
2440 /** @todo check what VT-x and AMD-V does. */
2441 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2442 }
2443
2444 /* commit */
2445 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
2446 if (rcStrict != VINF_SUCCESS)
2447 return rcStrict;
2448 if (enmEffOpSize == IEMMODE_16BIT)
2449 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2450 else
2451 pCtx->rip = uNewRip;
2452 pCtx->cs.Sel = uNewCs;
2453 pCtx->cs.ValidSel = uNewCs;
2454 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2455 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2456 pCtx->cs.u32Limit = cbLimitCs;
2457 pCtx->cs.u64Base = u64Base;
2458 /** @todo check if the hidden bits are loaded correctly for 64-bit
2459 * mode. */
2460 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
2461 if (cbPop)
2462 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2463 pCtx->eflags.Bits.u1RF = 0;
2464 }
2465 return VINF_SUCCESS;
2466}
2467
2468
2469/**
2470 * Implements retn.
2471 *
2472 * We're doing this in C because of the \#GP that might be raised if the popped
2473 * program counter is out of bounds.
2474 *
2475 * @param enmEffOpSize The effective operand size.
2476 * @param cbPop The amount of arguments to pop from the stack
2477 * (bytes).
2478 */
2479IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2480{
2481 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2482 NOREF(cbInstr);
2483
2484 /* Fetch the RSP from the stack. */
2485 VBOXSTRICTRC rcStrict;
2486 RTUINT64U NewRip;
2487 RTUINT64U NewRsp;
2488 NewRsp.u = pCtx->rsp;
2489 switch (enmEffOpSize)
2490 {
2491 case IEMMODE_16BIT:
2492 NewRip.u = 0;
2493 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
2494 break;
2495 case IEMMODE_32BIT:
2496 NewRip.u = 0;
2497 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
2498 break;
2499 case IEMMODE_64BIT:
2500 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
2501 break;
2502 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2503 }
2504 if (rcStrict != VINF_SUCCESS)
2505 return rcStrict;
2506
2507 /* Check the new RSP before loading it. */
2508 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
2509 * of it. The canonical test is performed here and for call. */
2510 if (enmEffOpSize != IEMMODE_64BIT)
2511 {
2512 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
2513 {
2514 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
2515 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2516 }
2517 }
2518 else
2519 {
2520 if (!IEM_IS_CANONICAL(NewRip.u))
2521 {
2522 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
2523 return iemRaiseNotCanonical(pIemCpu);
2524 }
2525 }
2526
2527 /* Apply cbPop */
2528 if (cbPop)
2529 iemRegAddToRspEx(pIemCpu, pCtx, &NewRsp, cbPop);
2530
2531 /* Commit it. */
2532 pCtx->rip = NewRip.u;
2533 pCtx->rsp = NewRsp.u;
2534 pCtx->eflags.Bits.u1RF = 0;
2535
2536 return VINF_SUCCESS;
2537}
2538
2539
2540/**
2541 * Implements enter.
2542 *
2543 * We're doing this in C because the instruction is insane, even for the
2544 * u8NestingLevel=0 case dealing with the stack is tedious.
2545 *
2546 * @param enmEffOpSize The effective operand size.
2547 */
2548IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
2549{
2550 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2551
2552 /* Push RBP, saving the old value in TmpRbp. */
2553 RTUINT64U NewRsp; NewRsp.u = pCtx->rsp;
2554 RTUINT64U TmpRbp; TmpRbp.u = pCtx->rbp;
2555 RTUINT64U NewRbp;
2556 VBOXSTRICTRC rcStrict;
2557 if (enmEffOpSize == IEMMODE_64BIT)
2558 {
2559 rcStrict = iemMemStackPushU64Ex(pIemCpu, TmpRbp.u, &NewRsp);
2560 NewRbp = NewRsp;
2561 }
2562 else if (enmEffOpSize == IEMMODE_32BIT)
2563 {
2564 rcStrict = iemMemStackPushU32Ex(pIemCpu, TmpRbp.DWords.dw0, &NewRsp);
2565 NewRbp = NewRsp;
2566 }
2567 else
2568 {
2569 rcStrict = iemMemStackPushU16Ex(pIemCpu, TmpRbp.Words.w0, &NewRsp);
2570 NewRbp = TmpRbp;
2571 NewRbp.Words.w0 = NewRsp.Words.w0;
2572 }
2573 if (rcStrict != VINF_SUCCESS)
2574 return rcStrict;
2575
2576 /* Copy the parameters (aka nesting levels by Intel). */
2577 cParameters &= 0x1f;
2578 if (cParameters > 0)
2579 {
2580 switch (enmEffOpSize)
2581 {
2582 case IEMMODE_16BIT:
2583 if (pCtx->ss.Attr.n.u1DefBig)
2584 TmpRbp.DWords.dw0 -= 2;
2585 else
2586 TmpRbp.Words.w0 -= 2;
2587 do
2588 {
2589 uint16_t u16Tmp;
2590 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Tmp, &TmpRbp);
2591 if (rcStrict != VINF_SUCCESS)
2592 break;
2593 rcStrict = iemMemStackPushU16Ex(pIemCpu, u16Tmp, &NewRsp);
2594 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2595 break;
2596
2597 case IEMMODE_32BIT:
2598 if (pCtx->ss.Attr.n.u1DefBig)
2599 TmpRbp.DWords.dw0 -= 4;
2600 else
2601 TmpRbp.Words.w0 -= 4;
2602 do
2603 {
2604 uint32_t u32Tmp;
2605 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Tmp, &TmpRbp);
2606 if (rcStrict != VINF_SUCCESS)
2607 break;
2608 rcStrict = iemMemStackPushU32Ex(pIemCpu, u32Tmp, &NewRsp);
2609 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2610 break;
2611
2612 case IEMMODE_64BIT:
2613 TmpRbp.u -= 8;
2614 do
2615 {
2616 uint64_t u64Tmp;
2617 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Tmp, &TmpRbp);
2618 if (rcStrict != VINF_SUCCESS)
2619 break;
2620 rcStrict = iemMemStackPushU64Ex(pIemCpu, u64Tmp, &NewRsp);
2621 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2622 break;
2623
2624 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2625 }
2626 if (rcStrict != VINF_SUCCESS)
2627 return VINF_SUCCESS;
2628
2629 /* Push the new RBP */
2630 if (enmEffOpSize == IEMMODE_64BIT)
2631 rcStrict = iemMemStackPushU64Ex(pIemCpu, NewRbp.u, &NewRsp);
2632 else if (enmEffOpSize == IEMMODE_32BIT)
2633 rcStrict = iemMemStackPushU32Ex(pIemCpu, NewRbp.DWords.dw0, &NewRsp);
2634 else
2635 rcStrict = iemMemStackPushU16Ex(pIemCpu, NewRbp.Words.w0, &NewRsp);
2636 if (rcStrict != VINF_SUCCESS)
2637 return rcStrict;
2638
2639 }
2640
2641 /* Recalc RSP. */
2642 iemRegSubFromRspEx(pIemCpu, pCtx, &NewRsp, cbFrame);
2643
2644 /** @todo Should probe write access at the new RSP according to AMD. */
2645
2646 /* Commit it. */
2647 pCtx->rbp = NewRbp.u;
2648 pCtx->rsp = NewRsp.u;
2649 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
2650
2651 return VINF_SUCCESS;
2652}
2653
2654
2655
2656/**
2657 * Implements leave.
2658 *
2659 * We're doing this in C because messing with the stack registers is annoying
2660 * since they depends on SS attributes.
2661 *
2662 * @param enmEffOpSize The effective operand size.
2663 */
2664IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
2665{
2666 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2667
2668 /* Calculate the intermediate RSP from RBP and the stack attributes. */
2669 RTUINT64U NewRsp;
2670 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2671 NewRsp.u = pCtx->rbp;
2672 else if (pCtx->ss.Attr.n.u1DefBig)
2673 NewRsp.u = pCtx->ebp;
2674 else
2675 {
2676 /** @todo Check that LEAVE actually preserve the high EBP bits. */
2677 NewRsp.u = pCtx->rsp;
2678 NewRsp.Words.w0 = pCtx->bp;
2679 }
2680
2681 /* Pop RBP according to the operand size. */
2682 VBOXSTRICTRC rcStrict;
2683 RTUINT64U NewRbp;
2684 switch (enmEffOpSize)
2685 {
2686 case IEMMODE_16BIT:
2687 NewRbp.u = pCtx->rbp;
2688 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
2689 break;
2690 case IEMMODE_32BIT:
2691 NewRbp.u = 0;
2692 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
2693 break;
2694 case IEMMODE_64BIT:
2695 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
2696 break;
2697 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2698 }
2699 if (rcStrict != VINF_SUCCESS)
2700 return rcStrict;
2701
2702
2703 /* Commit it. */
2704 pCtx->rbp = NewRbp.u;
2705 pCtx->rsp = NewRsp.u;
2706 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
2707
2708 return VINF_SUCCESS;
2709}
2710
2711
2712/**
2713 * Implements int3 and int XX.
2714 *
2715 * @param u8Int The interrupt vector number.
2716 * @param fIsBpInstr Is it the breakpoint instruction.
2717 */
2718IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
2719{
2720 Assert(pIemCpu->cXcptRecursions == 0);
2721 return iemRaiseXcptOrInt(pIemCpu,
2722 cbInstr,
2723 u8Int,
2724 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
2725 0,
2726 0);
2727}
2728
2729
2730/**
2731 * Implements iret for real mode and V8086 mode.
2732 *
2733 * @param enmEffOpSize The effective operand size.
2734 */
2735IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
2736{
2737 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2738 X86EFLAGS Efl;
2739 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
2740 NOREF(cbInstr);
2741
2742 /*
2743 * iret throws an exception if VME isn't enabled.
2744 */
2745 if ( Efl.Bits.u1VM
2746 && Efl.Bits.u2IOPL != 3
2747 && !(pCtx->cr4 & X86_CR4_VME))
2748 return iemRaiseGeneralProtectionFault0(pIemCpu);
2749
2750 /*
2751 * Do the stack bits, but don't commit RSP before everything checks
2752 * out right.
2753 */
2754 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2755 VBOXSTRICTRC rcStrict;
2756 RTCPTRUNION uFrame;
2757 uint16_t uNewCs;
2758 uint32_t uNewEip;
2759 uint32_t uNewFlags;
2760 uint64_t uNewRsp;
2761 if (enmEffOpSize == IEMMODE_32BIT)
2762 {
2763 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
2764 if (rcStrict != VINF_SUCCESS)
2765 return rcStrict;
2766 uNewEip = uFrame.pu32[0];
2767 if (uNewEip > UINT16_MAX)
2768 return iemRaiseGeneralProtectionFault0(pIemCpu);
2769
2770 uNewCs = (uint16_t)uFrame.pu32[1];
2771 uNewFlags = uFrame.pu32[2];
2772 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2773 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
2774 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
2775 | X86_EFL_ID;
2776 if (IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_386)
2777 uNewFlags &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
2778 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
2779 }
2780 else
2781 {
2782 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
2783 if (rcStrict != VINF_SUCCESS)
2784 return rcStrict;
2785 uNewEip = uFrame.pu16[0];
2786 uNewCs = uFrame.pu16[1];
2787 uNewFlags = uFrame.pu16[2];
2788 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2789 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
2790 uNewFlags |= Efl.u & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF);
2791 /** @todo The intel pseudo code does not indicate what happens to
2792 * reserved flags. We just ignore them. */
2793 /* Ancient CPU adjustments: See iemCImpl_popf. */
2794 if (IEM_GET_TARGET_CPU(pIemCpu) == IEMTARGETCPU_286)
2795 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL);
2796 }
2797 /** @todo Check how this is supposed to work if sp=0xfffe. */
2798 Log7(("iemCImpl_iret_real_v8086: uNewCs=%#06x uNewRip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n",
2799 uNewCs, uNewEip, uNewFlags, uNewRsp));
2800
2801 /*
2802 * Check the limit of the new EIP.
2803 */
2804 /** @todo Only the AMD pseudo code check the limit here, what's
2805 * right? */
2806 if (uNewEip > pCtx->cs.u32Limit)
2807 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2808
2809 /*
2810 * V8086 checks and flag adjustments
2811 */
2812 if (Efl.Bits.u1VM)
2813 {
2814 if (Efl.Bits.u2IOPL == 3)
2815 {
2816 /* Preserve IOPL and clear RF. */
2817 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
2818 uNewFlags |= Efl.u & (X86_EFL_IOPL);
2819 }
2820 else if ( enmEffOpSize == IEMMODE_16BIT
2821 && ( !(uNewFlags & X86_EFL_IF)
2822 || !Efl.Bits.u1VIP )
2823 && !(uNewFlags & X86_EFL_TF) )
2824 {
2825 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
2826 uNewFlags &= ~X86_EFL_VIF;
2827 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
2828 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
2829 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
2830 }
2831 else
2832 return iemRaiseGeneralProtectionFault0(pIemCpu);
2833 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags));
2834 }
2835
2836 /*
2837 * Commit the operation.
2838 */
2839 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
2840 if (rcStrict != VINF_SUCCESS)
2841 return rcStrict;
2842#ifdef DBGFTRACE_ENABLED
2843 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
2844 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewRsp);
2845#endif
2846
2847 pCtx->rip = uNewEip;
2848 pCtx->cs.Sel = uNewCs;
2849 pCtx->cs.ValidSel = uNewCs;
2850 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2851 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2852 /** @todo do we load attribs and limit as well? */
2853 Assert(uNewFlags & X86_EFL_1);
2854 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags);
2855
2856 return VINF_SUCCESS;
2857}
2858
2859
2860/**
2861 * Loads a segment register when entering V8086 mode.
2862 *
2863 * @param pSReg The segment register.
2864 * @param uSeg The segment to load.
2865 */
2866static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
2867{
2868 pSReg->Sel = uSeg;
2869 pSReg->ValidSel = uSeg;
2870 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2871 pSReg->u64Base = (uint32_t)uSeg << 4;
2872 pSReg->u32Limit = 0xffff;
2873 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
2874 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
2875 * IRET'ing to V8086. */
2876}
2877
2878
2879/**
2880 * Implements iret for protected mode returning to V8086 mode.
2881 *
2882 * @param pCtx Pointer to the CPU context.
2883 * @param uNewEip The new EIP.
2884 * @param uNewCs The new CS.
2885 * @param uNewFlags The new EFLAGS.
2886 * @param uNewRsp The RSP after the initial IRET frame.
2887 *
2888 * @note This can only be a 32-bit iret du to the X86_EFL_VM position.
2889 */
2890IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uint16_t, uNewCs,
2891 uint32_t, uNewFlags, uint64_t, uNewRsp)
2892{
2893 /*
2894 * Pop the V8086 specific frame bits off the stack.
2895 */
2896 VBOXSTRICTRC rcStrict;
2897 RTCPTRUNION uFrame;
2898 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 24, &uFrame.pv, &uNewRsp);
2899 if (rcStrict != VINF_SUCCESS)
2900 return rcStrict;
2901 uint32_t uNewEsp = uFrame.pu32[0];
2902 uint16_t uNewSs = uFrame.pu32[1];
2903 uint16_t uNewEs = uFrame.pu32[2];
2904 uint16_t uNewDs = uFrame.pu32[3];
2905 uint16_t uNewFs = uFrame.pu32[4];
2906 uint16_t uNewGs = uFrame.pu32[5];
2907 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2908 if (rcStrict != VINF_SUCCESS)
2909 return rcStrict;
2910
2911 /*
2912 * Commit the operation.
2913 */
2914 uNewFlags &= X86_EFL_LIVE_MASK;
2915 uNewFlags |= X86_EFL_RA1_MASK;
2916#ifdef DBGFTRACE_ENABLED
2917 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
2918 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);
2919#endif
2920
2921 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags);
2922 iemCImplCommonV8086LoadSeg(&pCtx->cs, uNewCs);
2923 iemCImplCommonV8086LoadSeg(&pCtx->ss, uNewSs);
2924 iemCImplCommonV8086LoadSeg(&pCtx->es, uNewEs);
2925 iemCImplCommonV8086LoadSeg(&pCtx->ds, uNewDs);
2926 iemCImplCommonV8086LoadSeg(&pCtx->fs, uNewFs);
2927 iemCImplCommonV8086LoadSeg(&pCtx->gs, uNewGs);
2928 pCtx->rip = (uint16_t)uNewEip;
2929 pCtx->rsp = uNewEsp; /** @todo check this out! */
2930 pIemCpu->uCpl = 3;
2931
2932 return VINF_SUCCESS;
2933}
2934
2935
2936/**
2937 * Implements iret for protected mode returning via a nested task.
2938 *
2939 * @param enmEffOpSize The effective operand size.
2940 */
2941IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
2942{
2943 Log7(("iemCImpl_iret_prot_NestedTask:\n"));
2944#ifndef IEM_IMPLEMENTS_TASKSWITCH
2945 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2946#else
2947 /*
2948 * Read the segment selector in the link-field of the current TSS.
2949 */
2950 RTSEL uSelRet;
2951 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2952 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pIemCpu, &uSelRet, UINT8_MAX, pCtx->tr.u64Base);
2953 if (rcStrict != VINF_SUCCESS)
2954 return rcStrict;
2955
2956 /*
2957 * Fetch the returning task's TSS descriptor from the GDT.
2958 */
2959 if (uSelRet & X86_SEL_LDT)
2960 {
2961 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet));
2962 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet);
2963 }
2964
2965 IEMSELDESC TssDesc;
2966 rcStrict = iemMemFetchSelDesc(pIemCpu, &TssDesc, uSelRet, X86_XCPT_GP);
2967 if (rcStrict != VINF_SUCCESS)
2968 return rcStrict;
2969
2970 if (TssDesc.Legacy.Gate.u1DescType)
2971 {
2972 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet));
2973 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
2974 }
2975
2976 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
2977 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
2978 {
2979 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
2980 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
2981 }
2982
2983 if (!TssDesc.Legacy.Gate.u1Present)
2984 {
2985 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet));
2986 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
2987 }
2988
2989 uint32_t uNextEip = pCtx->eip + cbInstr;
2990 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
2991 0 /* uCr2 */, uSelRet, &TssDesc);
2992#endif
2993}
2994
2995
2996/**
2997 * Implements iret for protected mode
2998 *
2999 * @param enmEffOpSize The effective operand size.
3000 */
3001IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
3002{
3003 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3004 NOREF(cbInstr);
3005 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3006
3007 /*
3008 * Nested task return.
3009 */
3010 if (pCtx->eflags.Bits.u1NT)
3011 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
3012
3013 /*
3014 * Normal return.
3015 *
3016 * Do the stack bits, but don't commit RSP before everything checks
3017 * out right.
3018 */
3019 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3020 VBOXSTRICTRC rcStrict;
3021 RTCPTRUNION uFrame;
3022 uint16_t uNewCs;
3023 uint32_t uNewEip;
3024 uint32_t uNewFlags;
3025 uint64_t uNewRsp;
3026 if (enmEffOpSize == IEMMODE_32BIT)
3027 {
3028 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
3029 if (rcStrict != VINF_SUCCESS)
3030 return rcStrict;
3031 uNewEip = uFrame.pu32[0];
3032 uNewCs = (uint16_t)uFrame.pu32[1];
3033 uNewFlags = uFrame.pu32[2];
3034 }
3035 else
3036 {
3037 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
3038 if (rcStrict != VINF_SUCCESS)
3039 return rcStrict;
3040 uNewEip = uFrame.pu16[0];
3041 uNewCs = uFrame.pu16[1];
3042 uNewFlags = uFrame.pu16[2];
3043 }
3044 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3045 if (rcStrict != VINF_SUCCESS)
3046 return rcStrict;
3047 Log7(("iemCImpl_iret_prot: uNewCs=%#06x uNewEip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n", uNewCs, uNewEip, uNewFlags, uNewRsp));
3048
3049 /*
3050 * We're hopefully not returning to V8086 mode...
3051 */
3052 if ( (uNewFlags & X86_EFL_VM)
3053 && pIemCpu->uCpl == 0)
3054 {
3055 Assert(enmEffOpSize == IEMMODE_32BIT);
3056 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, pCtx, uNewEip, uNewCs, uNewFlags, uNewRsp);
3057 }
3058
3059 /*
3060 * Protected mode.
3061 */
3062 /* Read the CS descriptor. */
3063 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3064 {
3065 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
3066 return iemRaiseGeneralProtectionFault0(pIemCpu);
3067 }
3068
3069 IEMSELDESC DescCS;
3070 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs, X86_XCPT_GP);
3071 if (rcStrict != VINF_SUCCESS)
3072 {
3073 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
3074 return rcStrict;
3075 }
3076
3077 /* Must be a code descriptor. */
3078 if (!DescCS.Legacy.Gen.u1DescType)
3079 {
3080 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3081 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3082 }
3083 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3084 {
3085 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3086 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3087 }
3088
3089#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3090 /* Raw ring-0 and ring-1 compression adjustments for PATM performance tricks and other CS leaks. */
3091 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3092 if (EMIsRawRing0Enabled(pVM) && !HMIsEnabled(pVM))
3093 {
3094 if ((uNewCs & X86_SEL_RPL) == 1)
3095 {
3096 if ( pIemCpu->uCpl == 0
3097 && ( !EMIsRawRing1Enabled(pVM)
3098 || pCtx->cs.Sel == (uNewCs & X86_SEL_MASK_OFF_RPL)) )
3099 {
3100 Log(("iret: Ring-0 compression fix: uNewCS=%#x -> %#x\n", uNewCs, uNewCs & X86_SEL_MASK_OFF_RPL));
3101 uNewCs &= X86_SEL_MASK_OFF_RPL;
3102 }
3103# ifdef LOG_ENABLED
3104 else if (pIemCpu->uCpl <= 1 && EMIsRawRing1Enabled(pVM))
3105 Log(("iret: uNewCs=%#x genuine return to ring-1.\n", uNewCs));
3106# endif
3107 }
3108 else if ( (uNewCs & X86_SEL_RPL) == 2
3109 && EMIsRawRing1Enabled(pVM)
3110 && pIemCpu->uCpl <= 1)
3111 {
3112 Log(("iret: Ring-1 compression fix: uNewCS=%#x -> %#x\n", uNewCs, (uNewCs & X86_SEL_MASK_OFF_RPL) | 1));
3113 uNewCs = (uNewCs & X86_SEL_MASK_OFF_RPL) | 2;
3114 }
3115 }
3116#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
3117
3118
3119 /* Privilege checks. */
3120 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3121 {
3122 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3123 {
3124 Log(("iret %04x:%08x - RPL != DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3125 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3126 }
3127 }
3128 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3129 {
3130 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3131 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3132 }
3133 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
3134 {
3135 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
3136 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3137 }
3138
3139 /* Present? */
3140 if (!DescCS.Legacy.Gen.u1Present)
3141 {
3142 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
3143 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
3144 }
3145
3146 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3147
3148 /*
3149 * Return to outer level?
3150 */
3151 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
3152 {
3153 uint16_t uNewSS;
3154 uint32_t uNewESP;
3155 if (enmEffOpSize == IEMMODE_32BIT)
3156 {
3157 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
3158 if (rcStrict != VINF_SUCCESS)
3159 return rcStrict;
3160/** @todo We might be popping a 32-bit ESP from the IRET frame, but whether
3161 * 16-bit or 32-bit are being loaded into SP depends on the D/B
3162 * bit of the popped SS selector it turns out. */
3163 uNewESP = uFrame.pu32[0];
3164 uNewSS = (uint16_t)uFrame.pu32[1];
3165 }
3166 else
3167 {
3168 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 4, &uFrame.pv, &uNewRsp);
3169 if (rcStrict != VINF_SUCCESS)
3170 return rcStrict;
3171 uNewESP = uFrame.pu16[0];
3172 uNewSS = uFrame.pu16[1];
3173 }
3174 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
3175 if (rcStrict != VINF_SUCCESS)
3176 return rcStrict;
3177 Log7(("iemCImpl_iret_prot: uNewSS=%#06x uNewESP=%#010x\n", uNewSS, uNewESP));
3178
3179 /* Read the SS descriptor. */
3180 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3181 {
3182 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
3183 return iemRaiseGeneralProtectionFault0(pIemCpu);
3184 }
3185
3186 IEMSELDESC DescSS;
3187 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
3188 if (rcStrict != VINF_SUCCESS)
3189 {
3190 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
3191 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
3192 return rcStrict;
3193 }
3194
3195 /* Privilege checks. */
3196 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3197 {
3198 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
3199 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3200 }
3201 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3202 {
3203 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
3204 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
3205 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3206 }
3207
3208 /* Must be a writeable data segment descriptor. */
3209 if (!DescSS.Legacy.Gen.u1DescType)
3210 {
3211 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
3212 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3213 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3214 }
3215 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3216 {
3217 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
3218 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3219 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3220 }
3221
3222 /* Present? */
3223 if (!DescSS.Legacy.Gen.u1Present)
3224 {
3225 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
3226 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
3227 }
3228
3229 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3230
3231 /* Check EIP. */
3232 if (uNewEip > cbLimitCS)
3233 {
3234 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
3235 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
3236 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3237 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3238 }
3239
3240 /*
3241 * Commit the changes, marking CS and SS accessed first since
3242 * that may fail.
3243 */
3244 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3245 {
3246 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
3247 if (rcStrict != VINF_SUCCESS)
3248 return rcStrict;
3249 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3250 }
3251 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3252 {
3253 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3254 if (rcStrict != VINF_SUCCESS)
3255 return rcStrict;
3256 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3257 }
3258
3259 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3260 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3261 if (enmEffOpSize != IEMMODE_16BIT)
3262 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3263 if (pIemCpu->uCpl == 0)
3264 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3265 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
3266 fEFlagsMask |= X86_EFL_IF;
3267 if (IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_386)
3268 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3269 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
3270 fEFlagsNew &= ~fEFlagsMask;
3271 fEFlagsNew |= uNewFlags & fEFlagsMask;
3272#ifdef DBGFTRACE_ENABLED
3273 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
3274 pIemCpu->uCpl, uNewCs & X86_SEL_RPL, pCtx->cs.Sel, pCtx->eip,
3275 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP);
3276#endif
3277
3278 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
3279 pCtx->rip = uNewEip;
3280 pCtx->cs.Sel = uNewCs;
3281 pCtx->cs.ValidSel = uNewCs;
3282 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3283 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3284 pCtx->cs.u32Limit = cbLimitCS;
3285 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3286 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
3287 if (!pCtx->ss.Attr.n.u1DefBig)
3288 pCtx->sp = (uint16_t)uNewESP;
3289 else
3290 pCtx->rsp = uNewESP;
3291 pCtx->ss.Sel = uNewSS;
3292 pCtx->ss.ValidSel = uNewSS;
3293 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3294 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3295 pCtx->ss.u32Limit = cbLimitSs;
3296 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3297
3298 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
3299 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
3300 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
3301 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
3302 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
3303
3304 /* Done! */
3305
3306 }
3307 /*
3308 * Return to the same level.
3309 */
3310 else
3311 {
3312 /* Check EIP. */
3313 if (uNewEip > cbLimitCS)
3314 {
3315 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
3316 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3317 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3318 }
3319
3320 /*
3321 * Commit the changes, marking CS first since it may fail.
3322 */
3323 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3324 {
3325 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
3326 if (rcStrict != VINF_SUCCESS)
3327 return rcStrict;
3328 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3329 }
3330
3331 X86EFLAGS NewEfl;
3332 NewEfl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
3333 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3334 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3335 if (enmEffOpSize != IEMMODE_16BIT)
3336 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3337 if (pIemCpu->uCpl == 0)
3338 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3339 else if (pIemCpu->uCpl <= NewEfl.Bits.u2IOPL)
3340 fEFlagsMask |= X86_EFL_IF;
3341 if (IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_386)
3342 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3343 NewEfl.u &= ~fEFlagsMask;
3344 NewEfl.u |= fEFlagsMask & uNewFlags;
3345#ifdef DBGFTRACE_ENABLED
3346 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
3347 pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip,
3348 uNewCs, uNewEip, uNewFlags, pCtx->ss.Sel, uNewRsp);
3349#endif
3350
3351 IEMMISC_SET_EFL(pIemCpu, pCtx, NewEfl.u);
3352 pCtx->rip = uNewEip;
3353 pCtx->cs.Sel = uNewCs;
3354 pCtx->cs.ValidSel = uNewCs;
3355 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3356 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3357 pCtx->cs.u32Limit = cbLimitCS;
3358 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3359 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
3360 pCtx->rsp = uNewRsp;
3361 /* Done! */
3362 }
3363 return VINF_SUCCESS;
3364}
3365
3366
3367/**
3368 * Implements iret for long mode
3369 *
3370 * @param enmEffOpSize The effective operand size.
3371 */
3372IEM_CIMPL_DEF_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize)
3373{
3374 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3375 NOREF(cbInstr);
3376
3377 /*
3378 * Nested task return is not supported in long mode.
3379 */
3380 if (pCtx->eflags.Bits.u1NT)
3381 {
3382 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pCtx->eflags.u));
3383 return iemRaiseGeneralProtectionFault0(pIemCpu);
3384 }
3385
3386 /*
3387 * Normal return.
3388 *
3389 * Do the stack bits, but don't commit RSP before everything checks
3390 * out right.
3391 */
3392 VBOXSTRICTRC rcStrict;
3393 RTCPTRUNION uFrame;
3394 uint64_t uNewRip;
3395 uint16_t uNewCs;
3396 uint16_t uNewSs;
3397 uint32_t uNewFlags;
3398 uint64_t uNewRsp;
3399 if (enmEffOpSize == IEMMODE_64BIT)
3400 {
3401 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*8, &uFrame.pv, &uNewRsp);
3402 if (rcStrict != VINF_SUCCESS)
3403 return rcStrict;
3404 uNewRip = uFrame.pu64[0];
3405 uNewCs = (uint16_t)uFrame.pu64[1];
3406 uNewFlags = (uint32_t)uFrame.pu64[2];
3407 uNewRsp = uFrame.pu64[3];
3408 uNewSs = (uint16_t)uFrame.pu64[4];
3409 }
3410 else if (enmEffOpSize == IEMMODE_32BIT)
3411 {
3412 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*4, &uFrame.pv, &uNewRsp);
3413 if (rcStrict != VINF_SUCCESS)
3414 return rcStrict;
3415 uNewRip = uFrame.pu32[0];
3416 uNewCs = (uint16_t)uFrame.pu32[1];
3417 uNewFlags = uFrame.pu32[2];
3418 uNewRsp = uFrame.pu32[3];
3419 uNewSs = (uint16_t)uFrame.pu32[4];
3420 }
3421 else
3422 {
3423 Assert(enmEffOpSize == IEMMODE_16BIT);
3424 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*2, &uFrame.pv, &uNewRsp);
3425 if (rcStrict != VINF_SUCCESS)
3426 return rcStrict;
3427 uNewRip = uFrame.pu16[0];
3428 uNewCs = uFrame.pu16[1];
3429 uNewFlags = uFrame.pu16[2];
3430 uNewRsp = uFrame.pu16[3];
3431 uNewSs = uFrame.pu16[4];
3432 }
3433 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3434 if (rcStrict != VINF_SUCCESS)
3435 return rcStrict;
3436 Log7(("iretq stack: cs:rip=%04x:%016RX64 rflags=%016RX64 ss:rsp=%04x:%016RX64\n", uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
3437
3438 /*
3439 * Check stuff.
3440 */
3441 /* Read the CS descriptor. */
3442 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3443 {
3444 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3445 return iemRaiseGeneralProtectionFault0(pIemCpu);
3446 }
3447
3448 IEMSELDESC DescCS;
3449 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs, X86_XCPT_GP);
3450 if (rcStrict != VINF_SUCCESS)
3451 {
3452 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
3453 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3454 return rcStrict;
3455 }
3456
3457 /* Must be a code descriptor. */
3458 if ( !DescCS.Legacy.Gen.u1DescType
3459 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3460 {
3461 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
3462 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3463 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3464 }
3465
3466 /* Privilege checks. */
3467 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
3468 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3469 {
3470 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3471 {
3472 Log(("iret %04x:%016RX64 - RPL != DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3473 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3474 }
3475 }
3476 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3477 {
3478 Log(("iret %04x:%016RX64 - RPL < DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3479 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3480 }
3481 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
3482 {
3483 Log(("iret %04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, pIemCpu->uCpl));
3484 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3485 }
3486
3487 /* Present? */
3488 if (!DescCS.Legacy.Gen.u1Present)
3489 {
3490 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3491 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
3492 }
3493
3494 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3495
3496 /* Read the SS descriptor. */
3497 IEMSELDESC DescSS;
3498 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3499 {
3500 if ( !DescCS.Legacy.Gen.u1Long
3501 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
3502 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
3503 {
3504 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3505 return iemRaiseGeneralProtectionFault0(pIemCpu);
3506 }
3507 DescSS.Legacy.u = 0;
3508 }
3509 else
3510 {
3511 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
3512 if (rcStrict != VINF_SUCCESS)
3513 {
3514 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
3515 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3516 return rcStrict;
3517 }
3518 }
3519
3520 /* Privilege checks. */
3521 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3522 {
3523 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3524 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3525 }
3526
3527 uint32_t cbLimitSs;
3528 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3529 cbLimitSs = UINT32_MAX;
3530 else
3531 {
3532 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3533 {
3534 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
3535 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
3536 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3537 }
3538
3539 /* Must be a writeable data segment descriptor. */
3540 if (!DescSS.Legacy.Gen.u1DescType)
3541 {
3542 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
3543 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3544 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3545 }
3546 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3547 {
3548 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
3549 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3550 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3551 }
3552
3553 /* Present? */
3554 if (!DescSS.Legacy.Gen.u1Present)
3555 {
3556 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3557 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSs);
3558 }
3559 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3560 }
3561
3562 /* Check EIP. */
3563 if (DescCS.Legacy.Gen.u1Long)
3564 {
3565 if (!IEM_IS_CANONICAL(uNewRip))
3566 {
3567 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
3568 uNewCs, uNewRip, uNewSs, uNewRsp));
3569 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3570 }
3571 }
3572 else
3573 {
3574 if (uNewRip > cbLimitCS)
3575 {
3576 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
3577 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
3578 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3579 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3580 }
3581 }
3582
3583 /*
3584 * Commit the changes, marking CS and SS accessed first since
3585 * that may fail.
3586 */
3587 /** @todo where exactly are these actually marked accessed by a real CPU? */
3588 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3589 {
3590 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
3591 if (rcStrict != VINF_SUCCESS)
3592 return rcStrict;
3593 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3594 }
3595 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3596 {
3597 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSs);
3598 if (rcStrict != VINF_SUCCESS)
3599 return rcStrict;
3600 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3601 }
3602
3603 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3604 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3605 if (enmEffOpSize != IEMMODE_16BIT)
3606 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3607 if (pIemCpu->uCpl == 0)
3608 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
3609 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
3610 fEFlagsMask |= X86_EFL_IF;
3611 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
3612 fEFlagsNew &= ~fEFlagsMask;
3613 fEFlagsNew |= uNewFlags & fEFlagsMask;
3614#ifdef DBGFTRACE_ENABLED
3615 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
3616 pIemCpu->uCpl, uNewCpl, pCtx->rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);
3617#endif
3618
3619 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
3620 pCtx->rip = uNewRip;
3621 pCtx->cs.Sel = uNewCs;
3622 pCtx->cs.ValidSel = uNewCs;
3623 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3624 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3625 pCtx->cs.u32Limit = cbLimitCS;
3626 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3627 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
3628 if (pCtx->cs.Attr.n.u1Long || pCtx->cs.Attr.n.u1DefBig)
3629 pCtx->rsp = uNewRsp;
3630 else
3631 pCtx->sp = (uint16_t)uNewRsp;
3632 pCtx->ss.Sel = uNewSs;
3633 pCtx->ss.ValidSel = uNewSs;
3634 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3635 {
3636 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3637 pCtx->ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
3638 pCtx->ss.u32Limit = UINT32_MAX;
3639 pCtx->ss.u64Base = 0;
3640 Log2(("iretq new SS: NULL\n"));
3641 }
3642 else
3643 {
3644 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3645 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3646 pCtx->ss.u32Limit = cbLimitSs;
3647 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3648 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u));
3649 }
3650
3651 if (pIemCpu->uCpl != uNewCpl)
3652 {
3653 pIemCpu->uCpl = uNewCpl;
3654 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->ds);
3655 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->es);
3656 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->fs);
3657 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->gs);
3658 }
3659
3660 return VINF_SUCCESS;
3661}
3662
3663
3664/**
3665 * Implements iret.
3666 *
3667 * @param enmEffOpSize The effective operand size.
3668 */
3669IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
3670{
3671 /*
3672 * First, clear NMI blocking, if any, before causing any exceptions.
3673 */
3674 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3675 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
3676
3677 /*
3678 * Call a mode specific worker.
3679 */
3680 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3681 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
3682 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3683 return IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize);
3684 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
3685}
3686
3687
3688/**
3689 * Implements SYSCALL (AMD and Intel64).
3690 *
3691 * @param enmEffOpSize The effective operand size.
3692 */
3693IEM_CIMPL_DEF_0(iemCImpl_syscall)
3694{
3695 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3696
3697 /*
3698 * Check preconditions.
3699 *
3700 * Note that CPUs described in the documentation may load a few odd values
3701 * into CS and SS than we allow here. This has yet to be checked on real
3702 * hardware.
3703 */
3704 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
3705 {
3706 Log(("syscall: Not enabled in EFER -> #UD\n"));
3707 return iemRaiseUndefinedOpcode(pIemCpu);
3708 }
3709 if (!(pCtx->cr0 & X86_CR0_PE))
3710 {
3711 Log(("syscall: Protected mode is required -> #GP(0)\n"));
3712 return iemRaiseGeneralProtectionFault0(pIemCpu);
3713 }
3714 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !CPUMIsGuestInLongModeEx(pCtx))
3715 {
3716 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3717 return iemRaiseUndefinedOpcode(pIemCpu);
3718 }
3719
3720 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
3721 /** @todo what about LDT selectors? Shouldn't matter, really. */
3722 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
3723 uint16_t uNewSs = uNewCs + 8;
3724 if (uNewCs == 0 || uNewSs == 0)
3725 {
3726 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
3727 return iemRaiseGeneralProtectionFault0(pIemCpu);
3728 }
3729
3730 /* Long mode and legacy mode differs. */
3731 if (CPUMIsGuestInLongModeEx(pCtx))
3732 {
3733 uint64_t uNewRip = pIemCpu->enmCpuMode == IEMMODE_64BIT ? pCtx->msrLSTAR : pCtx-> msrCSTAR;
3734
3735 /* This test isn't in the docs, but I'm not trusting the guys writing
3736 the MSRs to have validated the values as canonical like they should. */
3737 if (!IEM_IS_CANONICAL(uNewRip))
3738 {
3739 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3740 return iemRaiseUndefinedOpcode(pIemCpu);
3741 }
3742
3743 /*
3744 * Commit it.
3745 */
3746 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, uNewRip));
3747 pCtx->rcx = pCtx->rip + cbInstr;
3748 pCtx->rip = uNewRip;
3749
3750 pCtx->rflags.u &= ~X86_EFL_RF;
3751 pCtx->r11 = pCtx->rflags.u;
3752 pCtx->rflags.u &= ~pCtx->msrSFMASK;
3753 pCtx->rflags.u |= X86_EFL_1;
3754
3755 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3756 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3757 }
3758 else
3759 {
3760 /*
3761 * Commit it.
3762 */
3763 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n",
3764 pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, (uint32_t)(pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
3765 pCtx->rcx = pCtx->eip + cbInstr;
3766 pCtx->rip = pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
3767 pCtx->rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
3768
3769 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3770 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3771 }
3772 pCtx->cs.Sel = uNewCs;
3773 pCtx->cs.ValidSel = uNewCs;
3774 pCtx->cs.u64Base = 0;
3775 pCtx->cs.u32Limit = UINT32_MAX;
3776 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3777
3778 pCtx->ss.Sel = uNewSs;
3779 pCtx->ss.ValidSel = uNewSs;
3780 pCtx->ss.u64Base = 0;
3781 pCtx->ss.u32Limit = UINT32_MAX;
3782 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3783
3784 return VINF_SUCCESS;
3785}
3786
3787
3788/**
3789 * Implements SYSRET (AMD and Intel64).
3790 */
3791IEM_CIMPL_DEF_0(iemCImpl_sysret)
3792
3793{
3794 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3795
3796 /*
3797 * Check preconditions.
3798 *
3799 * Note that CPUs described in the documentation may load a few odd values
3800 * into CS and SS than we allow here. This has yet to be checked on real
3801 * hardware.
3802 */
3803 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
3804 {
3805 Log(("sysret: Not enabled in EFER -> #UD\n"));
3806 return iemRaiseUndefinedOpcode(pIemCpu);
3807 }
3808 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !CPUMIsGuestInLongModeEx(pCtx))
3809 {
3810 Log(("sysret: Only available in long mode on intel -> #UD\n"));
3811 return iemRaiseUndefinedOpcode(pIemCpu);
3812 }
3813 if (!(pCtx->cr0 & X86_CR0_PE))
3814 {
3815 Log(("sysret: Protected mode is required -> #GP(0)\n"));
3816 return iemRaiseGeneralProtectionFault0(pIemCpu);
3817 }
3818 if (pIemCpu->uCpl != 0)
3819 {
3820 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", pIemCpu->uCpl));
3821 return iemRaiseGeneralProtectionFault0(pIemCpu);
3822 }
3823
3824 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
3825 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
3826 uint16_t uNewSs = uNewCs + 8;
3827 if (pIemCpu->enmEffOpSize == IEMMODE_64BIT)
3828 uNewCs += 16;
3829 if (uNewCs == 0 || uNewSs == 0)
3830 {
3831 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
3832 return iemRaiseGeneralProtectionFault0(pIemCpu);
3833 }
3834
3835 /*
3836 * Commit it.
3837 */
3838 if (CPUMIsGuestInLongModeEx(pCtx))
3839 {
3840 if (pIemCpu->enmEffOpSize == IEMMODE_64BIT)
3841 {
3842 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n",
3843 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->rcx, pCtx->r11));
3844 /* Note! We disregard intel manual regarding the RCX cananonical
3845 check, ask intel+xen why AMD doesn't do it. */
3846 pCtx->rip = pCtx->rcx;
3847 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
3848 | (3 << X86DESCATTR_DPL_SHIFT);
3849 }
3850 else
3851 {
3852 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n",
3853 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->ecx, pCtx->r11));
3854 pCtx->rip = pCtx->ecx;
3855 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
3856 | (3 << X86DESCATTR_DPL_SHIFT);
3857 }
3858 /** @todo testcase: See what kind of flags we can make SYSRET restore and
3859 * what it really ignores. RF and VM are hinted at being zero, by AMD. */
3860 pCtx->rflags.u = pCtx->r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
3861 pCtx->rflags.u |= X86_EFL_1;
3862 }
3863 else
3864 {
3865 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, pCtx->ecx));
3866 pCtx->rip = pCtx->rcx;
3867 pCtx->rflags.u |= X86_EFL_IF;
3868 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
3869 | (3 << X86DESCATTR_DPL_SHIFT);
3870 }
3871 pCtx->cs.Sel = uNewCs | 3;
3872 pCtx->cs.ValidSel = uNewCs | 3;
3873 pCtx->cs.u64Base = 0;
3874 pCtx->cs.u32Limit = UINT32_MAX;
3875 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3876
3877 pCtx->ss.Sel = uNewSs | 3;
3878 pCtx->ss.ValidSel = uNewSs | 3;
3879 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3880 /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */
3881 pCtx->ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
3882 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
3883 * on sysret. */
3884
3885 return VINF_SUCCESS;
3886}
3887
3888
3889/**
3890 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
3891 *
3892 * @param iSegReg The segment register number (valid).
3893 * @param uSel The new selector value.
3894 */
3895IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
3896{
3897 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
3898 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
3899 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
3900
3901 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
3902
3903 /*
3904 * Real mode and V8086 mode are easy.
3905 */
3906 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
3907 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3908 {
3909 *pSel = uSel;
3910 pHid->u64Base = (uint32_t)uSel << 4;
3911 pHid->ValidSel = uSel;
3912 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
3913#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
3914 /** @todo Does the CPU actually load limits and attributes in the
3915 * real/V8086 mode segment load case? It doesn't for CS in far
3916 * jumps... Affects unreal mode. */
3917 pHid->u32Limit = 0xffff;
3918 pHid->Attr.u = 0;
3919 pHid->Attr.n.u1Present = 1;
3920 pHid->Attr.n.u1DescType = 1;
3921 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
3922 ? X86_SEL_TYPE_RW
3923 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
3924#endif
3925 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
3926 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
3927 return VINF_SUCCESS;
3928 }
3929
3930 /*
3931 * Protected mode.
3932 *
3933 * Check if it's a null segment selector value first, that's OK for DS, ES,
3934 * FS and GS. If not null, then we have to load and parse the descriptor.
3935 */
3936 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3937 {
3938 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
3939 if (iSegReg == X86_SREG_SS)
3940 {
3941 /* In 64-bit kernel mode, the stack can be 0 because of the way
3942 interrupts are dispatched. AMD seems to have a slighly more
3943 relaxed relationship to SS.RPL than intel does. */
3944 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
3945 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
3946 || pIemCpu->uCpl > 2
3947 || ( uSel != pIemCpu->uCpl
3948 && !IEM_IS_GUEST_CPU_AMD(pIemCpu)) )
3949 {
3950 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
3951 return iemRaiseGeneralProtectionFault0(pIemCpu);
3952 }
3953 }
3954
3955 *pSel = uSel; /* Not RPL, remember :-) */
3956 iemHlpLoadNullDataSelectorProt(pIemCpu, pHid, uSel);
3957 if (iSegReg == X86_SREG_SS)
3958 pHid->Attr.u |= pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT;
3959
3960 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
3961 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
3962
3963 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
3964 return VINF_SUCCESS;
3965 }
3966
3967 /* Fetch the descriptor. */
3968 IEMSELDESC Desc;
3969 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
3970 if (rcStrict != VINF_SUCCESS)
3971 return rcStrict;
3972
3973 /* Check GPs first. */
3974 if (!Desc.Legacy.Gen.u1DescType)
3975 {
3976 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
3977 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3978 }
3979 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
3980 {
3981 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3982 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3983 {
3984 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
3985 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3986 }
3987 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
3988 {
3989 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
3990 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3991 }
3992 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
3993 {
3994 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3995 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3996 }
3997 }
3998 else
3999 {
4000 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4001 {
4002 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
4003 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4004 }
4005 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4006 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4007 {
4008#if 0 /* this is what intel says. */
4009 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4010 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4011 {
4012 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
4013 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
4014 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4015 }
4016#else /* this is what makes more sense. */
4017 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4018 {
4019 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4020 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4021 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4022 }
4023 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4024 {
4025 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4026 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
4027 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4028 }
4029#endif
4030 }
4031 }
4032
4033 /* Is it there? */
4034 if (!Desc.Legacy.Gen.u1Present)
4035 {
4036 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4037 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
4038 }
4039
4040 /* The base and limit. */
4041 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4042 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
4043
4044 /*
4045 * Ok, everything checked out fine. Now set the accessed bit before
4046 * committing the result into the registers.
4047 */
4048 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4049 {
4050 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
4051 if (rcStrict != VINF_SUCCESS)
4052 return rcStrict;
4053 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4054 }
4055
4056 /* commit */
4057 *pSel = uSel;
4058 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4059 pHid->u32Limit = cbLimit;
4060 pHid->u64Base = u64Base;
4061 pHid->ValidSel = uSel;
4062 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4063
4064 /** @todo check if the hidden bits are loaded correctly for 64-bit
4065 * mode. */
4066 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
4067
4068 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
4069 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4070 return VINF_SUCCESS;
4071}
4072
4073
4074/**
4075 * Implements 'mov SReg, r/m'.
4076 *
4077 * @param iSegReg The segment register number (valid).
4078 * @param uSel The new selector value.
4079 */
4080IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
4081{
4082 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4083 if (rcStrict == VINF_SUCCESS)
4084 {
4085 if (iSegReg == X86_SREG_SS)
4086 {
4087 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4088 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4089 }
4090 }
4091 return rcStrict;
4092}
4093
4094
4095/**
4096 * Implements 'pop SReg'.
4097 *
4098 * @param iSegReg The segment register number (valid).
4099 * @param enmEffOpSize The efficient operand size (valid).
4100 */
4101IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4102{
4103 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4104 VBOXSTRICTRC rcStrict;
4105
4106 /*
4107 * Read the selector off the stack and join paths with mov ss, reg.
4108 */
4109 RTUINT64U TmpRsp;
4110 TmpRsp.u = pCtx->rsp;
4111 switch (enmEffOpSize)
4112 {
4113 case IEMMODE_16BIT:
4114 {
4115 uint16_t uSel;
4116 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
4117 if (rcStrict == VINF_SUCCESS)
4118 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4119 break;
4120 }
4121
4122 case IEMMODE_32BIT:
4123 {
4124 uint32_t u32Value;
4125 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
4126 if (rcStrict == VINF_SUCCESS)
4127 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
4128 break;
4129 }
4130
4131 case IEMMODE_64BIT:
4132 {
4133 uint64_t u64Value;
4134 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
4135 if (rcStrict == VINF_SUCCESS)
4136 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
4137 break;
4138 }
4139 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4140 }
4141
4142 /*
4143 * Commit the stack on success.
4144 */
4145 if (rcStrict == VINF_SUCCESS)
4146 {
4147 pCtx->rsp = TmpRsp.u;
4148 if (iSegReg == X86_SREG_SS)
4149 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4150 }
4151 return rcStrict;
4152}
4153
4154
4155/**
4156 * Implements lgs, lfs, les, lds & lss.
4157 */
4158IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
4159 uint16_t, uSel,
4160 uint64_t, offSeg,
4161 uint8_t, iSegReg,
4162 uint8_t, iGReg,
4163 IEMMODE, enmEffOpSize)
4164{
4165 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
4166 VBOXSTRICTRC rcStrict;
4167
4168 /*
4169 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
4170 */
4171 /** @todo verify and test that mov, pop and lXs works the segment
4172 * register loading in the exact same way. */
4173 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4174 if (rcStrict == VINF_SUCCESS)
4175 {
4176 switch (enmEffOpSize)
4177 {
4178 case IEMMODE_16BIT:
4179 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4180 break;
4181 case IEMMODE_32BIT:
4182 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4183 break;
4184 case IEMMODE_64BIT:
4185 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4186 break;
4187 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4188 }
4189 }
4190
4191 return rcStrict;
4192}
4193
4194
4195/**
4196 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
4197 *
4198 * @retval VINF_SUCCESS on success.
4199 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
4200 * @retval iemMemFetchSysU64 return value.
4201 *
4202 * @param pIemCpu The IEM state of the calling EMT.
4203 * @param uSel The selector value.
4204 * @param fAllowSysDesc Whether system descriptors are OK or not.
4205 * @param pDesc Where to return the descriptor on success.
4206 */
4207static VBOXSTRICTRC iemCImpl_LoadDescHelper(PIEMCPU pIemCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
4208{
4209 pDesc->Long.au64[0] = 0;
4210 pDesc->Long.au64[1] = 0;
4211
4212 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */
4213 return VINF_IEM_SELECTOR_NOT_OK;
4214
4215 /* Within the table limits? */
4216 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4217 RTGCPTR GCPtrBase;
4218 if (uSel & X86_SEL_LDT)
4219 {
4220 if ( !pCtx->ldtr.Attr.n.u1Present
4221 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
4222 return VINF_IEM_SELECTOR_NOT_OK;
4223 GCPtrBase = pCtx->ldtr.u64Base;
4224 }
4225 else
4226 {
4227 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
4228 return VINF_IEM_SELECTOR_NOT_OK;
4229 GCPtrBase = pCtx->gdtr.pGdt;
4230 }
4231
4232 /* Fetch the descriptor. */
4233 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4234 if (rcStrict != VINF_SUCCESS)
4235 return rcStrict;
4236 if (!pDesc->Legacy.Gen.u1DescType)
4237 {
4238 if (!fAllowSysDesc)
4239 return VINF_IEM_SELECTOR_NOT_OK;
4240 if (CPUMIsGuestInLongModeEx(pCtx))
4241 {
4242 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
4243 if (rcStrict != VINF_SUCCESS)
4244 return rcStrict;
4245 }
4246
4247 }
4248
4249 return VINF_SUCCESS;
4250}
4251
4252
4253/**
4254 * Implements verr (fWrite = false) and verw (fWrite = true).
4255 */
4256IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite)
4257{
4258 Assert(!IEM_IS_REAL_OR_V86_MODE(pIemCpu));
4259
4260 /** @todo figure whether the accessed bit is set or not. */
4261
4262 bool fAccessible = true;
4263 IEMSELDESC Desc;
4264 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pIemCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4265 if (rcStrict == VINF_SUCCESS)
4266 {
4267 /* Check the descriptor, order doesn't matter much here. */
4268 if ( !Desc.Legacy.Gen.u1DescType
4269 || !Desc.Legacy.Gen.u1Present)
4270 fAccessible = false;
4271 else
4272 {
4273 if ( fWrite
4274 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE
4275 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4276 fAccessible = false;
4277
4278 /** @todo testcase for the conforming behavior. */
4279 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4280 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4281 {
4282 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4283 fAccessible = false;
4284 else if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4285 fAccessible = false;
4286 }
4287 }
4288
4289 }
4290 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4291 fAccessible = false;
4292 else
4293 return rcStrict;
4294
4295 /* commit */
4296 pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fAccessible;
4297
4298 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4299 return VINF_SUCCESS;
4300}
4301
4302
4303/**
4304 * Implements LAR and LSL with 64-bit operand size.
4305 *
4306 * @returns VINF_SUCCESS.
4307 * @param pu16Dst Pointer to the destination register.
4308 * @param uSel The selector to load details for.
4309 * @param pEFlags Pointer to the eflags register.
4310 * @param fIsLar true = LAR, false = LSL.
4311 */
4312IEM_CIMPL_DEF_4(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, uint32_t *, pEFlags, bool, fIsLar)
4313{
4314 Assert(!IEM_IS_REAL_OR_V86_MODE(pIemCpu));
4315
4316 /** @todo figure whether the accessed bit is set or not. */
4317
4318 bool fDescOk = true;
4319 IEMSELDESC Desc;
4320 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pIemCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4321 if (rcStrict == VINF_SUCCESS)
4322 {
4323 /*
4324 * Check the descriptor type.
4325 */
4326 if (!Desc.Legacy.Gen.u1DescType)
4327 {
4328 if (CPUMIsGuestInLongModeEx(pIemCpu->CTX_SUFF(pCtx)))
4329 {
4330 if (Desc.Long.Gen.u5Zeros)
4331 fDescOk = false;
4332 else
4333 switch (Desc.Long.Gen.u4Type)
4334 {
4335 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
4336 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
4337 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
4338 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
4339 break;
4340 case AMD64_SEL_TYPE_SYS_CALL_GATE:
4341 fDescOk = fIsLar;
4342 break;
4343 default:
4344 fDescOk = false;
4345 break;
4346 }
4347 }
4348 else
4349 {
4350 switch (Desc.Long.Gen.u4Type)
4351 {
4352 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4353 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4354 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4355 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4356 case X86_SEL_TYPE_SYS_LDT:
4357 break;
4358 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4359 case X86_SEL_TYPE_SYS_TASK_GATE:
4360 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4361 fDescOk = fIsLar;
4362 break;
4363 default:
4364 fDescOk = false;
4365 break;
4366 }
4367 }
4368 }
4369 if (fDescOk)
4370 {
4371 /*
4372 * Check the RPL/DPL/CPL interaction..
4373 */
4374 /** @todo testcase for the conforming behavior. */
4375 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
4376 || !Desc.Legacy.Gen.u1DescType)
4377 {
4378 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4379 fDescOk = false;
4380 else if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4381 fDescOk = false;
4382 }
4383 }
4384
4385 if (fDescOk)
4386 {
4387 /*
4388 * All fine, start committing the result.
4389 */
4390 if (fIsLar)
4391 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00);
4392 else
4393 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy);
4394 }
4395
4396 }
4397 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4398 fDescOk = false;
4399 else
4400 return rcStrict;
4401
4402 /* commit flags value and advance rip. */
4403 pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fDescOk;
4404 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4405
4406 return VINF_SUCCESS;
4407}
4408
4409
4410/**
4411 * Implements LAR and LSL with 16-bit operand size.
4412 *
4413 * @returns VINF_SUCCESS.
4414 * @param pu16Dst Pointer to the destination register.
4415 * @param u16Sel The selector to load details for.
4416 * @param pEFlags Pointer to the eflags register.
4417 * @param fIsLar true = LAR, false = LSL.
4418 */
4419IEM_CIMPL_DEF_4(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, uint32_t *, pEFlags, bool, fIsLar)
4420{
4421 uint64_t u64TmpDst = *pu16Dst;
4422 IEM_CIMPL_CALL_4(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, pEFlags, fIsLar);
4423 *pu16Dst = (uint16_t)u64TmpDst;
4424 return VINF_SUCCESS;
4425}
4426
4427
4428/**
4429 * Implements lgdt.
4430 *
4431 * @param iEffSeg The segment of the new gdtr contents
4432 * @param GCPtrEffSrc The address of the new gdtr contents.
4433 * @param enmEffOpSize The effective operand size.
4434 */
4435IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4436{
4437 if (pIemCpu->uCpl != 0)
4438 return iemRaiseGeneralProtectionFault0(pIemCpu);
4439 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
4440
4441 /*
4442 * Fetch the limit and base address.
4443 */
4444 uint16_t cbLimit;
4445 RTGCPTR GCPtrBase;
4446 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4447 if (rcStrict == VINF_SUCCESS)
4448 {
4449 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
4450 || X86_IS_CANONICAL(GCPtrBase))
4451 {
4452 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4453 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
4454 else
4455 {
4456 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4457 pCtx->gdtr.cbGdt = cbLimit;
4458 pCtx->gdtr.pGdt = GCPtrBase;
4459 }
4460 if (rcStrict == VINF_SUCCESS)
4461 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4462 }
4463 else
4464 {
4465 Log(("iemCImpl_lgdt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
4466 return iemRaiseGeneralProtectionFault0(pIemCpu);
4467 }
4468 }
4469 return rcStrict;
4470}
4471
4472
4473/**
4474 * Implements sgdt.
4475 *
4476 * @param iEffSeg The segment where to store the gdtr content.
4477 * @param GCPtrEffDst The address where to store the gdtr content.
4478 */
4479IEM_CIMPL_DEF_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4480{
4481 /*
4482 * Join paths with sidt.
4483 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4484 * you really must know.
4485 */
4486 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4487 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst);
4488 if (rcStrict == VINF_SUCCESS)
4489 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4490 return rcStrict;
4491}
4492
4493
4494/**
4495 * Implements lidt.
4496 *
4497 * @param iEffSeg The segment of the new idtr contents
4498 * @param GCPtrEffSrc The address of the new idtr contents.
4499 * @param enmEffOpSize The effective operand size.
4500 */
4501IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4502{
4503 if (pIemCpu->uCpl != 0)
4504 return iemRaiseGeneralProtectionFault0(pIemCpu);
4505 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
4506
4507 /*
4508 * Fetch the limit and base address.
4509 */
4510 uint16_t cbLimit;
4511 RTGCPTR GCPtrBase;
4512 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4513 if (rcStrict == VINF_SUCCESS)
4514 {
4515 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
4516 || X86_IS_CANONICAL(GCPtrBase))
4517 {
4518 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4519 CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
4520 else
4521 {
4522 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4523 pCtx->idtr.cbIdt = cbLimit;
4524 pCtx->idtr.pIdt = GCPtrBase;
4525 }
4526 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4527 }
4528 else
4529 {
4530 Log(("iemCImpl_lidt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
4531 return iemRaiseGeneralProtectionFault0(pIemCpu);
4532 }
4533 }
4534 return rcStrict;
4535}
4536
4537
4538/**
4539 * Implements sidt.
4540 *
4541 * @param iEffSeg The segment where to store the idtr content.
4542 * @param GCPtrEffDst The address where to store the idtr content.
4543 */
4544IEM_CIMPL_DEF_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4545{
4546 /*
4547 * Join paths with sgdt.
4548 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4549 * you really must know.
4550 */
4551 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4552 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst);
4553 if (rcStrict == VINF_SUCCESS)
4554 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4555 return rcStrict;
4556}
4557
4558
4559/**
4560 * Implements lldt.
4561 *
4562 * @param uNewLdt The new LDT selector value.
4563 */
4564IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
4565{
4566 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4567
4568 /*
4569 * Check preconditions.
4570 */
4571 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4572 {
4573 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
4574 return iemRaiseUndefinedOpcode(pIemCpu);
4575 }
4576 if (pIemCpu->uCpl != 0)
4577 {
4578 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
4579 return iemRaiseGeneralProtectionFault0(pIemCpu);
4580 }
4581 if (uNewLdt & X86_SEL_LDT)
4582 {
4583 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
4584 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
4585 }
4586
4587 /*
4588 * Now, loading a NULL selector is easy.
4589 */
4590 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4591 {
4592 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
4593 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4594 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt);
4595 else
4596 pCtx->ldtr.Sel = uNewLdt;
4597 pCtx->ldtr.ValidSel = uNewLdt;
4598 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4599 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
4600 {
4601 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4602 pCtx->ldtr.u64Base = pCtx->ldtr.u32Limit = 0; /* For verfication against REM. */
4603 }
4604 else if (IEM_IS_GUEST_CPU_AMD(pIemCpu))
4605 {
4606 /* AMD-V seems to leave the base and limit alone. */
4607 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4608 }
4609 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
4610 {
4611 /* VT-x (Intel 3960x) seems to be doing the following. */
4612 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;
4613 pCtx->ldtr.u64Base = 0;
4614 pCtx->ldtr.u32Limit = UINT32_MAX;
4615 }
4616
4617 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4618 return VINF_SUCCESS;
4619 }
4620
4621 /*
4622 * Read the descriptor.
4623 */
4624 IEMSELDESC Desc;
4625 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
4626 if (rcStrict != VINF_SUCCESS)
4627 return rcStrict;
4628
4629 /* Check GPs first. */
4630 if (Desc.Legacy.Gen.u1DescType)
4631 {
4632 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4633 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4634 }
4635 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4636 {
4637 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4638 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4639 }
4640 uint64_t u64Base;
4641 if (!IEM_IS_LONG_MODE(pIemCpu))
4642 u64Base = X86DESC_BASE(&Desc.Legacy);
4643 else
4644 {
4645 if (Desc.Long.Gen.u5Zeros)
4646 {
4647 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
4648 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4649 }
4650
4651 u64Base = X86DESC64_BASE(&Desc.Long);
4652 if (!IEM_IS_CANONICAL(u64Base))
4653 {
4654 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
4655 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4656 }
4657 }
4658
4659 /* NP */
4660 if (!Desc.Legacy.Gen.u1Present)
4661 {
4662 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
4663 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
4664 }
4665
4666 /*
4667 * It checks out alright, update the registers.
4668 */
4669/** @todo check if the actual value is loaded or if the RPL is dropped */
4670 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4671 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK_OFF_RPL);
4672 else
4673 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4674 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4675 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4676 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4677 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
4678 pCtx->ldtr.u64Base = u64Base;
4679
4680 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4681 return VINF_SUCCESS;
4682}
4683
4684
4685/**
4686 * Implements lldt.
4687 *
4688 * @param uNewLdt The new LDT selector value.
4689 */
4690IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
4691{
4692 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4693
4694 /*
4695 * Check preconditions.
4696 */
4697 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4698 {
4699 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
4700 return iemRaiseUndefinedOpcode(pIemCpu);
4701 }
4702 if (pIemCpu->uCpl != 0)
4703 {
4704 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
4705 return iemRaiseGeneralProtectionFault0(pIemCpu);
4706 }
4707 if (uNewTr & X86_SEL_LDT)
4708 {
4709 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
4710 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
4711 }
4712 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
4713 {
4714 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
4715 return iemRaiseGeneralProtectionFault0(pIemCpu);
4716 }
4717
4718 /*
4719 * Read the descriptor.
4720 */
4721 IEMSELDESC Desc;
4722 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
4723 if (rcStrict != VINF_SUCCESS)
4724 return rcStrict;
4725
4726 /* Check GPs first. */
4727 if (Desc.Legacy.Gen.u1DescType)
4728 {
4729 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
4730 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4731 }
4732 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
4733 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4734 || IEM_IS_LONG_MODE(pIemCpu)) )
4735 {
4736 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
4737 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4738 }
4739 uint64_t u64Base;
4740 if (!IEM_IS_LONG_MODE(pIemCpu))
4741 u64Base = X86DESC_BASE(&Desc.Legacy);
4742 else
4743 {
4744 if (Desc.Long.Gen.u5Zeros)
4745 {
4746 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
4747 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4748 }
4749
4750 u64Base = X86DESC64_BASE(&Desc.Long);
4751 if (!IEM_IS_CANONICAL(u64Base))
4752 {
4753 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
4754 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4755 }
4756 }
4757
4758 /* NP */
4759 if (!Desc.Legacy.Gen.u1Present)
4760 {
4761 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
4762 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
4763 }
4764
4765 /*
4766 * Set it busy.
4767 * Note! Intel says this should lock down the whole descriptor, but we'll
4768 * restrict our selves to 32-bit for now due to lack of inline
4769 * assembly and such.
4770 */
4771 void *pvDesc;
4772 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW);
4773 if (rcStrict != VINF_SUCCESS)
4774 return rcStrict;
4775 switch ((uintptr_t)pvDesc & 3)
4776 {
4777 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
4778 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
4779 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
4780 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
4781 }
4782 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvDesc, IEM_ACCESS_DATA_RW);
4783 if (rcStrict != VINF_SUCCESS)
4784 return rcStrict;
4785 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4786
4787 /*
4788 * It checks out alright, update the registers.
4789 */
4790/** @todo check if the actual value is loaded or if the RPL is dropped */
4791 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4792 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK_OFF_RPL);
4793 else
4794 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
4795 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
4796 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4797 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4798 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
4799 pCtx->tr.u64Base = u64Base;
4800
4801 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4802 return VINF_SUCCESS;
4803}
4804
4805
4806/**
4807 * Implements mov GReg,CRx.
4808 *
4809 * @param iGReg The general register to store the CRx value in.
4810 * @param iCrReg The CRx register to read (valid).
4811 */
4812IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
4813{
4814 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4815 if (pIemCpu->uCpl != 0)
4816 return iemRaiseGeneralProtectionFault0(pIemCpu);
4817 Assert(!pCtx->eflags.Bits.u1VM);
4818
4819 /* read it */
4820 uint64_t crX;
4821 switch (iCrReg)
4822 {
4823 case 0:
4824 crX = pCtx->cr0;
4825 if (IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_386)
4826 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */
4827 break;
4828 case 2: crX = pCtx->cr2; break;
4829 case 3: crX = pCtx->cr3; break;
4830 case 4: crX = pCtx->cr4; break;
4831 case 8:
4832 {
4833 uint8_t uTpr;
4834 int rc = PDMApicGetTPR(IEMCPU_TO_VMCPU(pIemCpu), &uTpr, NULL, NULL);
4835 if (RT_SUCCESS(rc))
4836 crX = uTpr >> 4;
4837 else
4838 crX = 0;
4839 break;
4840 }
4841 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
4842 }
4843
4844 /* store it */
4845 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4846 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
4847 else
4848 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
4849
4850 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4851 return VINF_SUCCESS;
4852}
4853
4854
4855/**
4856 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
4857 *
4858 * @param iCrReg The CRx register to write (valid).
4859 * @param uNewCrX The new value.
4860 */
4861IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
4862{
4863 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4864 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4865 VBOXSTRICTRC rcStrict;
4866 int rc;
4867
4868 /*
4869 * Try store it.
4870 * Unfortunately, CPUM only does a tiny bit of the work.
4871 */
4872 switch (iCrReg)
4873 {
4874 case 0:
4875 {
4876 /*
4877 * Perform checks.
4878 */
4879 uint64_t const uOldCrX = pCtx->cr0;
4880 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
4881 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
4882 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
4883
4884 /* ET is hardcoded on 486 and later. */
4885 if (IEM_GET_TARGET_CPU(pIemCpu) > IEMTARGETCPU_486)
4886 uNewCrX |= X86_CR0_ET;
4887 /* The 386 and 486 didn't #GP(0) on attempting to set reserved CR0 bits. ET was settable on 386. */
4888 else if (IEM_GET_TARGET_CPU(pIemCpu) == IEMTARGETCPU_486)
4889 {
4890 uNewCrX &= fValid;
4891 uNewCrX |= X86_CR0_ET;
4892 }
4893 else
4894 uNewCrX &= X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG | X86_CR0_ET;
4895
4896 /* Check for reserved bits. */
4897 if (uNewCrX & ~(uint64_t)fValid)
4898 {
4899 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
4900 return iemRaiseGeneralProtectionFault0(pIemCpu);
4901 }
4902
4903 /* Check for invalid combinations. */
4904 if ( (uNewCrX & X86_CR0_PG)
4905 && !(uNewCrX & X86_CR0_PE) )
4906 {
4907 Log(("Trying to set CR0.PG without CR0.PE\n"));
4908 return iemRaiseGeneralProtectionFault0(pIemCpu);
4909 }
4910
4911 if ( !(uNewCrX & X86_CR0_CD)
4912 && (uNewCrX & X86_CR0_NW) )
4913 {
4914 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
4915 return iemRaiseGeneralProtectionFault0(pIemCpu);
4916 }
4917
4918 /* Long mode consistency checks. */
4919 if ( (uNewCrX & X86_CR0_PG)
4920 && !(uOldCrX & X86_CR0_PG)
4921 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
4922 {
4923 if (!(pCtx->cr4 & X86_CR4_PAE))
4924 {
4925 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
4926 return iemRaiseGeneralProtectionFault0(pIemCpu);
4927 }
4928 if (pCtx->cs.Attr.n.u1Long)
4929 {
4930 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
4931 return iemRaiseGeneralProtectionFault0(pIemCpu);
4932 }
4933 }
4934
4935 /** @todo check reserved PDPTR bits as AMD states. */
4936
4937 /*
4938 * Change CR0.
4939 */
4940 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4941 CPUMSetGuestCR0(pVCpu, uNewCrX);
4942 else
4943 pCtx->cr0 = uNewCrX;
4944 Assert(pCtx->cr0 == uNewCrX);
4945
4946 /*
4947 * Change EFER.LMA if entering or leaving long mode.
4948 */
4949 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
4950 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
4951 {
4952 uint64_t NewEFER = pCtx->msrEFER;
4953 if (uNewCrX & X86_CR0_PG)
4954 NewEFER |= MSR_K6_EFER_LMA;
4955 else
4956 NewEFER &= ~MSR_K6_EFER_LMA;
4957
4958 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4959 CPUMSetGuestEFER(pVCpu, NewEFER);
4960 else
4961 pCtx->msrEFER = NewEFER;
4962 Assert(pCtx->msrEFER == NewEFER);
4963 }
4964
4965 /*
4966 * Inform PGM.
4967 */
4968 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4969 {
4970 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
4971 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
4972 {
4973 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
4974 AssertRCReturn(rc, rc);
4975 /* ignore informational status codes */
4976 }
4977 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
4978 }
4979 else
4980 rcStrict = VINF_SUCCESS;
4981
4982#ifdef IN_RC
4983 /* Return to ring-3 for rescheduling if WP or AM changes. */
4984 if ( rcStrict == VINF_SUCCESS
4985 && ( (uNewCrX & (X86_CR0_WP | X86_CR0_AM))
4986 != (uOldCrX & (X86_CR0_WP | X86_CR0_AM))) )
4987 rcStrict = VINF_EM_RESCHEDULE;
4988#endif
4989 break;
4990 }
4991
4992 /*
4993 * CR2 can be changed without any restrictions.
4994 */
4995 case 2:
4996 pCtx->cr2 = uNewCrX;
4997 rcStrict = VINF_SUCCESS;
4998 break;
4999
5000 /*
5001 * CR3 is relatively simple, although AMD and Intel have different
5002 * accounts of how setting reserved bits are handled. We take intel's
5003 * word for the lower bits and AMD's for the high bits (63:52). The
5004 * lower reserved bits are ignored and left alone; OpenBSD 5.8 relies
5005 * on this.
5006 */
5007 /** @todo Testcase: Setting reserved bits in CR3, especially before
5008 * enabling paging. */
5009 case 3:
5010 {
5011 /* check / mask the value. */
5012 if (uNewCrX & UINT64_C(0xfff0000000000000))
5013 {
5014 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
5015 return iemRaiseGeneralProtectionFault0(pIemCpu);
5016 }
5017
5018 uint64_t fValid;
5019 if ( (pCtx->cr4 & X86_CR4_PAE)
5020 && (pCtx->msrEFER & MSR_K6_EFER_LME))
5021 fValid = UINT64_C(0x000fffffffffffff);
5022 else
5023 fValid = UINT64_C(0xffffffff);
5024 if (uNewCrX & ~fValid)
5025 {
5026 Log(("Automatically clearing reserved MBZ bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
5027 uNewCrX, uNewCrX & ~fValid));
5028 uNewCrX &= fValid;
5029 }
5030
5031 /** @todo If we're in PAE mode we should check the PDPTRs for
5032 * invalid bits. */
5033
5034 /* Make the change. */
5035 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5036 {
5037 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
5038 AssertRCSuccessReturn(rc, rc);
5039 }
5040 else
5041 pCtx->cr3 = uNewCrX;
5042
5043 /* Inform PGM. */
5044 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5045 {
5046 if (pCtx->cr0 & X86_CR0_PG)
5047 {
5048 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
5049 AssertRCReturn(rc, rc);
5050 /* ignore informational status codes */
5051 }
5052 }
5053 rcStrict = VINF_SUCCESS;
5054 break;
5055 }
5056
5057 /*
5058 * CR4 is a bit more tedious as there are bits which cannot be cleared
5059 * under some circumstances and such.
5060 */
5061 case 4:
5062 {
5063 uint64_t const uOldCrX = pCtx->cr4;
5064
5065 /** @todo Shouldn't this look at the guest CPUID bits to determine
5066 * valid bits? e.g. if guest CPUID doesn't allow X86_CR4_OSXMMEEXCPT, we
5067 * should #GP(0). */
5068 /* reserved bits */
5069 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
5070 | X86_CR4_TSD | X86_CR4_DE
5071 | X86_CR4_PSE | X86_CR4_PAE
5072 | X86_CR4_MCE | X86_CR4_PGE
5073 | X86_CR4_PCE | X86_CR4_OSFXSR
5074 | X86_CR4_OSXMMEEXCPT;
5075 //if (xxx)
5076 // fValid |= X86_CR4_VMXE;
5077 if (IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fXSaveRstor)
5078 fValid |= X86_CR4_OSXSAVE;
5079 if (uNewCrX & ~(uint64_t)fValid)
5080 {
5081 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5082 return iemRaiseGeneralProtectionFault0(pIemCpu);
5083 }
5084
5085 /* long mode checks. */
5086 if ( (uOldCrX & X86_CR4_PAE)
5087 && !(uNewCrX & X86_CR4_PAE)
5088 && CPUMIsGuestInLongModeEx(pCtx) )
5089 {
5090 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
5091 return iemRaiseGeneralProtectionFault0(pIemCpu);
5092 }
5093
5094
5095 /*
5096 * Change it.
5097 */
5098 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5099 {
5100 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
5101 AssertRCSuccessReturn(rc, rc);
5102 }
5103 else
5104 pCtx->cr4 = uNewCrX;
5105 Assert(pCtx->cr4 == uNewCrX);
5106
5107 /*
5108 * Notify SELM and PGM.
5109 */
5110 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5111 {
5112 /* SELM - VME may change things wrt to the TSS shadowing. */
5113 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
5114 {
5115 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
5116 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
5117#ifdef VBOX_WITH_RAW_MODE
5118 if (!HMIsEnabled(IEMCPU_TO_VM(pIemCpu)))
5119 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
5120#endif
5121 }
5122
5123 /* PGM - flushing and mode. */
5124 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE))
5125 {
5126 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
5127 AssertRCReturn(rc, rc);
5128 /* ignore informational status codes */
5129 }
5130 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
5131 }
5132 else
5133 rcStrict = VINF_SUCCESS;
5134 break;
5135 }
5136
5137 /*
5138 * CR8 maps to the APIC TPR.
5139 */
5140 case 8:
5141 if (uNewCrX & ~(uint64_t)0xf)
5142 {
5143 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
5144 return iemRaiseGeneralProtectionFault0(pIemCpu);
5145 }
5146
5147 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5148 PDMApicSetTPR(IEMCPU_TO_VMCPU(pIemCpu), (uint8_t)uNewCrX << 4);
5149 rcStrict = VINF_SUCCESS;
5150 break;
5151
5152 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5153 }
5154
5155 /*
5156 * Advance the RIP on success.
5157 */
5158 if (RT_SUCCESS(rcStrict))
5159 {
5160 if (rcStrict != VINF_SUCCESS)
5161 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
5162 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5163 }
5164
5165 return rcStrict;
5166}
5167
5168
5169/**
5170 * Implements mov CRx,GReg.
5171 *
5172 * @param iCrReg The CRx register to write (valid).
5173 * @param iGReg The general register to load the DRx value from.
5174 */
5175IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
5176{
5177 if (pIemCpu->uCpl != 0)
5178 return iemRaiseGeneralProtectionFault0(pIemCpu);
5179 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
5180
5181 /*
5182 * Read the new value from the source register and call common worker.
5183 */
5184 uint64_t uNewCrX;
5185 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5186 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
5187 else
5188 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
5189 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
5190}
5191
5192
5193/**
5194 * Implements 'LMSW r/m16'
5195 *
5196 * @param u16NewMsw The new value.
5197 */
5198IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
5199{
5200 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5201
5202 if (pIemCpu->uCpl != 0)
5203 return iemRaiseGeneralProtectionFault0(pIemCpu);
5204 Assert(!pCtx->eflags.Bits.u1VM);
5205
5206 /*
5207 * Compose the new CR0 value and call common worker.
5208 */
5209 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5210 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5211 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
5212}
5213
5214
5215/**
5216 * Implements 'CLTS'.
5217 */
5218IEM_CIMPL_DEF_0(iemCImpl_clts)
5219{
5220 if (pIemCpu->uCpl != 0)
5221 return iemRaiseGeneralProtectionFault0(pIemCpu);
5222
5223 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5224 uint64_t uNewCr0 = pCtx->cr0;
5225 uNewCr0 &= ~X86_CR0_TS;
5226 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
5227}
5228
5229
5230/**
5231 * Implements mov GReg,DRx.
5232 *
5233 * @param iGReg The general register to store the DRx value in.
5234 * @param iDrReg The DRx register to read (0-7).
5235 */
5236IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
5237{
5238 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5239
5240 /*
5241 * Check preconditions.
5242 */
5243
5244 /* Raise GPs. */
5245 if (pIemCpu->uCpl != 0)
5246 return iemRaiseGeneralProtectionFault0(pIemCpu);
5247 Assert(!pCtx->eflags.Bits.u1VM);
5248
5249 if ( (iDrReg == 4 || iDrReg == 5)
5250 && (pCtx->cr4 & X86_CR4_DE) )
5251 {
5252 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
5253 return iemRaiseGeneralProtectionFault0(pIemCpu);
5254 }
5255
5256 /* Raise #DB if general access detect is enabled. */
5257 if (pCtx->dr[7] & X86_DR7_GD)
5258 {
5259 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
5260 return iemRaiseDebugException(pIemCpu);
5261 }
5262
5263 /*
5264 * Read the debug register and store it in the specified general register.
5265 */
5266 uint64_t drX;
5267 switch (iDrReg)
5268 {
5269 case 0: drX = pCtx->dr[0]; break;
5270 case 1: drX = pCtx->dr[1]; break;
5271 case 2: drX = pCtx->dr[2]; break;
5272 case 3: drX = pCtx->dr[3]; break;
5273 case 6:
5274 case 4:
5275 drX = pCtx->dr[6];
5276 drX |= X86_DR6_RA1_MASK;
5277 drX &= ~X86_DR6_RAZ_MASK;
5278 break;
5279 case 7:
5280 case 5:
5281 drX = pCtx->dr[7];
5282 drX |=X86_DR7_RA1_MASK;
5283 drX &= ~X86_DR7_RAZ_MASK;
5284 break;
5285 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5286 }
5287
5288 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5289 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
5290 else
5291 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
5292
5293 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5294 return VINF_SUCCESS;
5295}
5296
5297
5298/**
5299 * Implements mov DRx,GReg.
5300 *
5301 * @param iDrReg The DRx register to write (valid).
5302 * @param iGReg The general register to load the DRx value from.
5303 */
5304IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
5305{
5306 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5307
5308 /*
5309 * Check preconditions.
5310 */
5311 if (pIemCpu->uCpl != 0)
5312 return iemRaiseGeneralProtectionFault0(pIemCpu);
5313 Assert(!pCtx->eflags.Bits.u1VM);
5314
5315 if (iDrReg == 4 || iDrReg == 5)
5316 {
5317 if (pCtx->cr4 & X86_CR4_DE)
5318 {
5319 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
5320 return iemRaiseGeneralProtectionFault0(pIemCpu);
5321 }
5322 iDrReg += 2;
5323 }
5324
5325 /* Raise #DB if general access detect is enabled. */
5326 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
5327 * \#GP? */
5328 if (pCtx->dr[7] & X86_DR7_GD)
5329 {
5330 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
5331 return iemRaiseDebugException(pIemCpu);
5332 }
5333
5334 /*
5335 * Read the new value from the source register.
5336 */
5337 uint64_t uNewDrX;
5338 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5339 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
5340 else
5341 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
5342
5343 /*
5344 * Adjust it.
5345 */
5346 switch (iDrReg)
5347 {
5348 case 0:
5349 case 1:
5350 case 2:
5351 case 3:
5352 /* nothing to adjust */
5353 break;
5354
5355 case 6:
5356 if (uNewDrX & X86_DR6_MBZ_MASK)
5357 {
5358 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5359 return iemRaiseGeneralProtectionFault0(pIemCpu);
5360 }
5361 uNewDrX |= X86_DR6_RA1_MASK;
5362 uNewDrX &= ~X86_DR6_RAZ_MASK;
5363 break;
5364
5365 case 7:
5366 if (uNewDrX & X86_DR7_MBZ_MASK)
5367 {
5368 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5369 return iemRaiseGeneralProtectionFault0(pIemCpu);
5370 }
5371 uNewDrX |= X86_DR7_RA1_MASK;
5372 uNewDrX &= ~X86_DR7_RAZ_MASK;
5373 break;
5374
5375 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5376 }
5377
5378 /*
5379 * Do the actual setting.
5380 */
5381 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5382 {
5383 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
5384 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc);
5385 }
5386 else
5387 pCtx->dr[iDrReg] = uNewDrX;
5388
5389 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5390 return VINF_SUCCESS;
5391}
5392
5393
5394/**
5395 * Implements 'INVLPG m'.
5396 *
5397 * @param GCPtrPage The effective address of the page to invalidate.
5398 * @remarks Updates the RIP.
5399 */
5400IEM_CIMPL_DEF_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage)
5401{
5402 /* ring-0 only. */
5403 if (pIemCpu->uCpl != 0)
5404 return iemRaiseGeneralProtectionFault0(pIemCpu);
5405 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
5406
5407 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
5408 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5409
5410 if (rc == VINF_SUCCESS)
5411 return VINF_SUCCESS;
5412 if (rc == VINF_PGM_SYNC_CR3)
5413 return iemSetPassUpStatus(pIemCpu, rc);
5414
5415 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
5416 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", GCPtrPage, rc));
5417 return rc;
5418}
5419
5420
5421/**
5422 * Implements RDTSC.
5423 */
5424IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
5425{
5426 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5427
5428 /*
5429 * Check preconditions.
5430 */
5431 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fTsc)
5432 return iemRaiseUndefinedOpcode(pIemCpu);
5433
5434 if ( (pCtx->cr4 & X86_CR4_TSD)
5435 && pIemCpu->uCpl != 0)
5436 {
5437 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
5438 return iemRaiseGeneralProtectionFault0(pIemCpu);
5439 }
5440
5441 /*
5442 * Do the job.
5443 */
5444 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
5445 pCtx->rax = (uint32_t)uTicks;
5446 pCtx->rdx = uTicks >> 32;
5447#ifdef IEM_VERIFICATION_MODE_FULL
5448 pIemCpu->fIgnoreRaxRdx = true;
5449#endif
5450
5451 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5452 return VINF_SUCCESS;
5453}
5454
5455
5456/**
5457 * Implements RDMSR.
5458 */
5459IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
5460{
5461 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5462
5463 /*
5464 * Check preconditions.
5465 */
5466 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMsr)
5467 return iemRaiseUndefinedOpcode(pIemCpu);
5468 if (pIemCpu->uCpl != 0)
5469 return iemRaiseGeneralProtectionFault0(pIemCpu);
5470
5471 /*
5472 * Do the job.
5473 */
5474 RTUINT64U uValue;
5475 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
5476 if (rcStrict == VINF_SUCCESS)
5477 {
5478 pCtx->rax = uValue.s.Lo;
5479 pCtx->rdx = uValue.s.Hi;
5480
5481 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5482 return VINF_SUCCESS;
5483 }
5484
5485#ifndef IN_RING3
5486 /* Deferred to ring-3. */
5487 if (rcStrict == VINF_CPUM_R3_MSR_READ)
5488 {
5489 Log(("IEM: rdmsr(%#x) -> ring-3\n", pCtx->ecx));
5490 return rcStrict;
5491 }
5492#else /* IN_RING3 */
5493 /* Often a unimplemented MSR or MSR bit, so worth logging. */
5494 static uint32_t s_cTimes = 0;
5495 if (s_cTimes++ < 10)
5496 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
5497 else
5498#endif
5499 Log(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
5500 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
5501 return iemRaiseGeneralProtectionFault0(pIemCpu);
5502}
5503
5504
5505/**
5506 * Implements WRMSR.
5507 */
5508IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
5509{
5510 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5511
5512 /*
5513 * Check preconditions.
5514 */
5515 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMsr)
5516 return iemRaiseUndefinedOpcode(pIemCpu);
5517 if (pIemCpu->uCpl != 0)
5518 return iemRaiseGeneralProtectionFault0(pIemCpu);
5519
5520 /*
5521 * Do the job.
5522 */
5523 RTUINT64U uValue;
5524 uValue.s.Lo = pCtx->eax;
5525 uValue.s.Hi = pCtx->edx;
5526
5527 VBOXSTRICTRC rcStrict;
5528 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5529 rcStrict = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
5530 else
5531 {
5532#ifdef IN_RING3
5533 CPUMCTX CtxTmp = *pCtx;
5534 rcStrict = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
5535 PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
5536 *pCtx = *pCtx2;
5537 *pCtx2 = CtxTmp;
5538#else
5539 AssertReleaseFailedReturn(VERR_IEM_IPE_2);
5540#endif
5541 }
5542 if (rcStrict == VINF_SUCCESS)
5543 {
5544 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5545 return VINF_SUCCESS;
5546 }
5547
5548#ifndef IN_RING3
5549 /* Deferred to ring-3. */
5550 if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
5551 {
5552 Log(("IEM: rdmsr(%#x) -> ring-3\n", pCtx->ecx));
5553 return rcStrict;
5554 }
5555#else /* IN_RING3 */
5556 /* Often a unimplemented MSR or MSR bit, so worth logging. */
5557 static uint32_t s_cTimes = 0;
5558 if (s_cTimes++ < 10)
5559 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
5560 else
5561#endif
5562 Log(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
5563 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
5564 return iemRaiseGeneralProtectionFault0(pIemCpu);
5565}
5566
5567
5568/**
5569 * Implements 'IN eAX, port'.
5570 *
5571 * @param u16Port The source port.
5572 * @param cbReg The register size.
5573 */
5574IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
5575{
5576 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5577
5578 /*
5579 * CPL check
5580 */
5581 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
5582 if (rcStrict != VINF_SUCCESS)
5583 return rcStrict;
5584
5585 /*
5586 * Perform the I/O.
5587 */
5588 uint32_t u32Value;
5589 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5590 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, &u32Value, cbReg);
5591 else
5592 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
5593 if (IOM_SUCCESS(rcStrict))
5594 {
5595 switch (cbReg)
5596 {
5597 case 1: pCtx->al = (uint8_t)u32Value; break;
5598 case 2: pCtx->ax = (uint16_t)u32Value; break;
5599 case 4: pCtx->rax = u32Value; break;
5600 default: AssertFailedReturn(VERR_IEM_IPE_3);
5601 }
5602 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5603 pIemCpu->cPotentialExits++;
5604 if (rcStrict != VINF_SUCCESS)
5605 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
5606 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
5607
5608 /*
5609 * Check for I/O breakpoints.
5610 */
5611 uint32_t const uDr7 = pCtx->dr[7];
5612 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
5613 && X86_DR7_ANY_RW_IO(uDr7)
5614 && (pCtx->cr4 & X86_CR4_DE))
5615 || DBGFBpIsHwIoArmed(IEMCPU_TO_VM(pIemCpu))))
5616 {
5617 rcStrict = DBGFBpCheckIo(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), pCtx, u16Port, cbReg);
5618 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
5619 rcStrict = iemRaiseDebugException(pIemCpu);
5620 }
5621 }
5622
5623 return rcStrict;
5624}
5625
5626
5627/**
5628 * Implements 'IN eAX, DX'.
5629 *
5630 * @param cbReg The register size.
5631 */
5632IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
5633{
5634 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
5635}
5636
5637
5638/**
5639 * Implements 'OUT port, eAX'.
5640 *
5641 * @param u16Port The destination port.
5642 * @param cbReg The register size.
5643 */
5644IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
5645{
5646 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5647
5648 /*
5649 * CPL check
5650 */
5651 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
5652 if (rcStrict != VINF_SUCCESS)
5653 return rcStrict;
5654
5655 /*
5656 * Perform the I/O.
5657 */
5658 uint32_t u32Value;
5659 switch (cbReg)
5660 {
5661 case 1: u32Value = pCtx->al; break;
5662 case 2: u32Value = pCtx->ax; break;
5663 case 4: u32Value = pCtx->eax; break;
5664 default: AssertFailedReturn(VERR_IEM_IPE_4);
5665 }
5666 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5667 rcStrict = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, u32Value, cbReg);
5668 else
5669 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
5670 if (IOM_SUCCESS(rcStrict))
5671 {
5672 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5673 pIemCpu->cPotentialExits++;
5674 if (rcStrict != VINF_SUCCESS)
5675 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
5676 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
5677
5678 /*
5679 * Check for I/O breakpoints.
5680 */
5681 uint32_t const uDr7 = pCtx->dr[7];
5682 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
5683 && X86_DR7_ANY_RW_IO(uDr7)
5684 && (pCtx->cr4 & X86_CR4_DE))
5685 || DBGFBpIsHwIoArmed(IEMCPU_TO_VM(pIemCpu))))
5686 {
5687 rcStrict = DBGFBpCheckIo(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), pCtx, u16Port, cbReg);
5688 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
5689 rcStrict = iemRaiseDebugException(pIemCpu);
5690 }
5691 }
5692 return rcStrict;
5693}
5694
5695
5696/**
5697 * Implements 'OUT DX, eAX'.
5698 *
5699 * @param cbReg The register size.
5700 */
5701IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
5702{
5703 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
5704}
5705
5706
5707/**
5708 * Implements 'CLI'.
5709 */
5710IEM_CIMPL_DEF_0(iemCImpl_cli)
5711{
5712 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5713 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
5714 uint32_t const fEflOld = fEfl;
5715 if (pCtx->cr0 & X86_CR0_PE)
5716 {
5717 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
5718 if (!(fEfl & X86_EFL_VM))
5719 {
5720 if (pIemCpu->uCpl <= uIopl)
5721 fEfl &= ~X86_EFL_IF;
5722 else if ( pIemCpu->uCpl == 3
5723 && (pCtx->cr4 & X86_CR4_PVI) )
5724 fEfl &= ~X86_EFL_VIF;
5725 else
5726 return iemRaiseGeneralProtectionFault0(pIemCpu);
5727 }
5728 /* V8086 */
5729 else if (uIopl == 3)
5730 fEfl &= ~X86_EFL_IF;
5731 else if ( uIopl < 3
5732 && (pCtx->cr4 & X86_CR4_VME) )
5733 fEfl &= ~X86_EFL_VIF;
5734 else
5735 return iemRaiseGeneralProtectionFault0(pIemCpu);
5736 }
5737 /* real mode */
5738 else
5739 fEfl &= ~X86_EFL_IF;
5740
5741 /* Commit. */
5742 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
5743 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5744 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld);
5745 return VINF_SUCCESS;
5746}
5747
5748
5749/**
5750 * Implements 'STI'.
5751 */
5752IEM_CIMPL_DEF_0(iemCImpl_sti)
5753{
5754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5755 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
5756 uint32_t const fEflOld = fEfl;
5757
5758 if (pCtx->cr0 & X86_CR0_PE)
5759 {
5760 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
5761 if (!(fEfl & X86_EFL_VM))
5762 {
5763 if (pIemCpu->uCpl <= uIopl)
5764 fEfl |= X86_EFL_IF;
5765 else if ( pIemCpu->uCpl == 3
5766 && (pCtx->cr4 & X86_CR4_PVI)
5767 && !(fEfl & X86_EFL_VIP) )
5768 fEfl |= X86_EFL_VIF;
5769 else
5770 return iemRaiseGeneralProtectionFault0(pIemCpu);
5771 }
5772 /* V8086 */
5773 else if (uIopl == 3)
5774 fEfl |= X86_EFL_IF;
5775 else if ( uIopl < 3
5776 && (pCtx->cr4 & X86_CR4_VME)
5777 && !(fEfl & X86_EFL_VIP) )
5778 fEfl |= X86_EFL_VIF;
5779 else
5780 return iemRaiseGeneralProtectionFault0(pIemCpu);
5781 }
5782 /* real mode */
5783 else
5784 fEfl |= X86_EFL_IF;
5785
5786 /* Commit. */
5787 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
5788 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5789 if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
5790 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
5791 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
5792 return VINF_SUCCESS;
5793}
5794
5795
5796/**
5797 * Implements 'HLT'.
5798 */
5799IEM_CIMPL_DEF_0(iemCImpl_hlt)
5800{
5801 if (pIemCpu->uCpl != 0)
5802 return iemRaiseGeneralProtectionFault0(pIemCpu);
5803 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5804 return VINF_EM_HALT;
5805}
5806
5807
5808/**
5809 * Implements 'MONITOR'.
5810 */
5811IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
5812{
5813 /*
5814 * Permission checks.
5815 */
5816 if (pIemCpu->uCpl != 0)
5817 {
5818 Log2(("monitor: CPL != 0\n"));
5819 return iemRaiseUndefinedOpcode(pIemCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
5820 }
5821 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMonitorMWait)
5822 {
5823 Log2(("monitor: Not in CPUID\n"));
5824 return iemRaiseUndefinedOpcode(pIemCpu);
5825 }
5826
5827 /*
5828 * Gather the operands and validate them.
5829 */
5830 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5831 RTGCPTR GCPtrMem = pIemCpu->enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
5832 uint32_t uEcx = pCtx->ecx;
5833 uint32_t uEdx = pCtx->edx;
5834/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
5835 * \#GP first. */
5836 if (uEcx != 0)
5837 {
5838 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx);
5839 return iemRaiseGeneralProtectionFault0(pIemCpu);
5840 }
5841
5842 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
5843 if (rcStrict != VINF_SUCCESS)
5844 return rcStrict;
5845
5846 RTGCPHYS GCPhysMem;
5847 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
5848 if (rcStrict != VINF_SUCCESS)
5849 return rcStrict;
5850
5851 /*
5852 * Call EM to prepare the monitor/wait.
5853 */
5854 rcStrict = EMMonitorWaitPrepare(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rax, pCtx->rcx, pCtx->rdx, GCPhysMem);
5855 Assert(rcStrict == VINF_SUCCESS);
5856
5857 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5858 return rcStrict;
5859}
5860
5861
5862/**
5863 * Implements 'MWAIT'.
5864 */
5865IEM_CIMPL_DEF_0(iemCImpl_mwait)
5866{
5867 /*
5868 * Permission checks.
5869 */
5870 if (pIemCpu->uCpl != 0)
5871 {
5872 Log2(("mwait: CPL != 0\n"));
5873 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
5874 * EFLAGS.VM then.) */
5875 return iemRaiseUndefinedOpcode(pIemCpu);
5876 }
5877 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMonitorMWait)
5878 {
5879 Log2(("mwait: Not in CPUID\n"));
5880 return iemRaiseUndefinedOpcode(pIemCpu);
5881 }
5882
5883 /*
5884 * Gather the operands and validate them.
5885 */
5886 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5887 uint32_t uEax = pCtx->eax;
5888 uint32_t uEcx = pCtx->ecx;
5889 if (uEcx != 0)
5890 {
5891 /* Only supported extension is break on IRQ when IF=0. */
5892 if (uEcx > 1)
5893 {
5894 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
5895 return iemRaiseGeneralProtectionFault0(pIemCpu);
5896 }
5897 uint32_t fMWaitFeatures = 0;
5898 uint32_t uIgnore = 0;
5899 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 5, 0, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
5900 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
5901 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
5902 {
5903 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
5904 return iemRaiseGeneralProtectionFault0(pIemCpu);
5905 }
5906 }
5907
5908 /*
5909 * Call EM to prepare the monitor/wait.
5910 */
5911 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(IEMCPU_TO_VMCPU(pIemCpu), uEax, uEcx);
5912
5913 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5914 return rcStrict;
5915}
5916
5917
5918/**
5919 * Implements 'SWAPGS'.
5920 */
5921IEM_CIMPL_DEF_0(iemCImpl_swapgs)
5922{
5923 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT); /* Caller checks this. */
5924
5925 /*
5926 * Permission checks.
5927 */
5928 if (pIemCpu->uCpl != 0)
5929 {
5930 Log2(("swapgs: CPL != 0\n"));
5931 return iemRaiseUndefinedOpcode(pIemCpu);
5932 }
5933
5934 /*
5935 * Do the job.
5936 */
5937 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5938 uint64_t uOtherGsBase = pCtx->msrKERNELGSBASE;
5939 pCtx->msrKERNELGSBASE = pCtx->gs.u64Base;
5940 pCtx->gs.u64Base = uOtherGsBase;
5941
5942 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5943 return VINF_SUCCESS;
5944}
5945
5946
5947/**
5948 * Implements 'CPUID'.
5949 */
5950IEM_CIMPL_DEF_0(iemCImpl_cpuid)
5951{
5952 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5953
5954 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
5955 pCtx->rax &= UINT32_C(0xffffffff);
5956 pCtx->rbx &= UINT32_C(0xffffffff);
5957 pCtx->rcx &= UINT32_C(0xffffffff);
5958 pCtx->rdx &= UINT32_C(0xffffffff);
5959
5960 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5961 return VINF_SUCCESS;
5962}
5963
5964
5965/**
5966 * Implements 'AAD'.
5967 *
5968 * @param bImm The immediate operand.
5969 */
5970IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
5971{
5972 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5973
5974 uint16_t const ax = pCtx->ax;
5975 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
5976 pCtx->ax = al;
5977 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
5978 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
5979 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
5980
5981 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5982 return VINF_SUCCESS;
5983}
5984
5985
5986/**
5987 * Implements 'AAM'.
5988 *
5989 * @param bImm The immediate operand. Cannot be 0.
5990 */
5991IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
5992{
5993 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5994 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
5995
5996 uint16_t const ax = pCtx->ax;
5997 uint8_t const al = (uint8_t)ax % bImm;
5998 uint8_t const ah = (uint8_t)ax / bImm;
5999 pCtx->ax = (ah << 8) + al;
6000 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
6001 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
6002 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
6003
6004 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6005 return VINF_SUCCESS;
6006}
6007
6008
6009/**
6010 * Implements 'DAA'.
6011 */
6012IEM_CIMPL_DEF_0(iemCImpl_daa)
6013{
6014 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6015
6016 uint8_t const al = pCtx->al;
6017 bool const fCarry = pCtx->eflags.Bits.u1CF;
6018
6019 if ( pCtx->eflags.Bits.u1AF
6020 || (al & 0xf) >= 10)
6021 {
6022 pCtx->al = al + 6;
6023 pCtx->eflags.Bits.u1AF = 1;
6024 }
6025 else
6026 pCtx->eflags.Bits.u1AF = 0;
6027
6028 if (al >= 0x9a || fCarry)
6029 {
6030 pCtx->al += 0x60;
6031 pCtx->eflags.Bits.u1CF = 1;
6032 }
6033 else
6034 pCtx->eflags.Bits.u1CF = 0;
6035
6036 iemHlpUpdateArithEFlagsU8(pIemCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6037 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6038 return VINF_SUCCESS;
6039}
6040
6041
6042/**
6043 * Implements 'DAS'.
6044 */
6045IEM_CIMPL_DEF_0(iemCImpl_das)
6046{
6047 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6048
6049 uint8_t const uInputAL = pCtx->al;
6050 bool const fCarry = pCtx->eflags.Bits.u1CF;
6051
6052 if ( pCtx->eflags.Bits.u1AF
6053 || (uInputAL & 0xf) >= 10)
6054 {
6055 pCtx->eflags.Bits.u1AF = 1;
6056 if (uInputAL < 6)
6057 pCtx->eflags.Bits.u1CF = 1;
6058 pCtx->al = uInputAL - 6;
6059 }
6060 else
6061 {
6062 pCtx->eflags.Bits.u1AF = 0;
6063 pCtx->eflags.Bits.u1CF = 0;
6064 }
6065
6066 if (uInputAL >= 0x9a || fCarry)
6067 {
6068 pCtx->al -= 0x60;
6069 pCtx->eflags.Bits.u1CF = 1;
6070 }
6071
6072 iemHlpUpdateArithEFlagsU8(pIemCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6073 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6074 return VINF_SUCCESS;
6075}
6076
6077
6078
6079
6080/*
6081 * Instantiate the various string operation combinations.
6082 */
6083#define OP_SIZE 8
6084#define ADDR_SIZE 16
6085#include "IEMAllCImplStrInstr.cpp.h"
6086#define OP_SIZE 8
6087#define ADDR_SIZE 32
6088#include "IEMAllCImplStrInstr.cpp.h"
6089#define OP_SIZE 8
6090#define ADDR_SIZE 64
6091#include "IEMAllCImplStrInstr.cpp.h"
6092
6093#define OP_SIZE 16
6094#define ADDR_SIZE 16
6095#include "IEMAllCImplStrInstr.cpp.h"
6096#define OP_SIZE 16
6097#define ADDR_SIZE 32
6098#include "IEMAllCImplStrInstr.cpp.h"
6099#define OP_SIZE 16
6100#define ADDR_SIZE 64
6101#include "IEMAllCImplStrInstr.cpp.h"
6102
6103#define OP_SIZE 32
6104#define ADDR_SIZE 16
6105#include "IEMAllCImplStrInstr.cpp.h"
6106#define OP_SIZE 32
6107#define ADDR_SIZE 32
6108#include "IEMAllCImplStrInstr.cpp.h"
6109#define OP_SIZE 32
6110#define ADDR_SIZE 64
6111#include "IEMAllCImplStrInstr.cpp.h"
6112
6113#define OP_SIZE 64
6114#define ADDR_SIZE 32
6115#include "IEMAllCImplStrInstr.cpp.h"
6116#define OP_SIZE 64
6117#define ADDR_SIZE 64
6118#include "IEMAllCImplStrInstr.cpp.h"
6119
6120
6121/**
6122 * Implements 'XGETBV'.
6123 */
6124IEM_CIMPL_DEF_0(iemCImpl_xgetbv)
6125{
6126 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6127 if (pCtx->cr4 & X86_CR4_OSXSAVE)
6128 {
6129 uint32_t uEcx = pCtx->ecx;
6130 switch (uEcx)
6131 {
6132 case 0:
6133 break;
6134
6135 case 1: /** @todo Implement XCR1 support. */
6136 default:
6137 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx));
6138 return iemRaiseGeneralProtectionFault0(pIemCpu);
6139
6140 }
6141 pCtx->rax = RT_LO_U32(pCtx->aXcr[uEcx]);
6142 pCtx->rdx = RT_HI_U32(pCtx->aXcr[uEcx]);
6143
6144 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6145 return VINF_SUCCESS;
6146 }
6147 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n"));
6148 return iemRaiseUndefinedOpcode(pIemCpu);
6149}
6150
6151
6152/**
6153 * Implements 'XSETBV'.
6154 */
6155IEM_CIMPL_DEF_0(iemCImpl_xsetbv)
6156{
6157 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6158 if (pCtx->cr4 & X86_CR4_OSXSAVE)
6159 {
6160 if (pIemCpu->uCpl == 0)
6161 {
6162 uint32_t uEcx = pCtx->ecx;
6163 uint64_t uNewValue = RT_MAKE_U64(pCtx->eax, pCtx->edx);
6164 switch (uEcx)
6165 {
6166 case 0:
6167 {
6168 int rc = CPUMSetGuestXcr0(IEMCPU_TO_VMCPU(pIemCpu), uNewValue);
6169 if (rc == VINF_SUCCESS)
6170 break;
6171 Assert(rc == VERR_CPUM_RAISE_GP_0);
6172 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
6173 return iemRaiseGeneralProtectionFault0(pIemCpu);
6174 }
6175
6176 case 1: /** @todo Implement XCR1 support. */
6177 default:
6178 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
6179 return iemRaiseGeneralProtectionFault0(pIemCpu);
6180
6181 }
6182
6183 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6184 return VINF_SUCCESS;
6185 }
6186
6187 Log(("xsetbv cpl=%u -> GP(0)\n", pIemCpu->uCpl));
6188 return iemRaiseGeneralProtectionFault0(pIemCpu);
6189 }
6190 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n"));
6191 return iemRaiseUndefinedOpcode(pIemCpu);
6192}
6193
6194
6195
6196/**
6197 * Implements 'FINIT' and 'FNINIT'.
6198 *
6199 * @param fCheckXcpts Whether to check for umasked pending exceptions or
6200 * not.
6201 */
6202IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
6203{
6204 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6205
6206 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
6207 return iemRaiseDeviceNotAvailable(pIemCpu);
6208
6209 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
6210 if (fCheckXcpts && TODO )
6211 return iemRaiseMathFault(pIemCpu);
6212 */
6213
6214 PX86XSAVEAREA pXState = pCtx->CTX_SUFF(pXState);
6215 pXState->x87.FCW = 0x37f;
6216 pXState->x87.FSW = 0;
6217 pXState->x87.FTW = 0x00; /* 0 - empty. */
6218 pXState->x87.FPUDP = 0;
6219 pXState->x87.DS = 0; //??
6220 pXState->x87.Rsrvd2= 0;
6221 pXState->x87.FPUIP = 0;
6222 pXState->x87.CS = 0; //??
6223 pXState->x87.Rsrvd1= 0;
6224 pXState->x87.FOP = 0;
6225
6226 iemHlpUsedFpu(pIemCpu);
6227 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6228 return VINF_SUCCESS;
6229}
6230
6231
6232/**
6233 * Implements 'FXSAVE'.
6234 *
6235 * @param iEffSeg The effective segment.
6236 * @param GCPtrEff The address of the image.
6237 * @param enmEffOpSize The operand size (only REX.W really matters).
6238 */
6239IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
6240{
6241 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6242
6243 /*
6244 * Raise exceptions.
6245 */
6246 if (pCtx->cr0 & X86_CR0_EM)
6247 return iemRaiseUndefinedOpcode(pIemCpu);
6248 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
6249 return iemRaiseDeviceNotAvailable(pIemCpu);
6250 if (GCPtrEff & 15)
6251 {
6252 /** @todo CPU/VM detection possible! \#AC might not be signal for
6253 * all/any misalignment sizes, intel says its an implementation detail. */
6254 if ( (pCtx->cr0 & X86_CR0_AM)
6255 && pCtx->eflags.Bits.u1AC
6256 && pIemCpu->uCpl == 3)
6257 return iemRaiseAlignmentCheckException(pIemCpu);
6258 return iemRaiseGeneralProtectionFault0(pIemCpu);
6259 }
6260
6261 /*
6262 * Access the memory.
6263 */
6264 void *pvMem512;
6265 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6266 if (rcStrict != VINF_SUCCESS)
6267 return rcStrict;
6268 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
6269 PCX86FXSTATE pSrc = &pCtx->CTX_SUFF(pXState)->x87;
6270
6271 /*
6272 * Store the registers.
6273 */
6274 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
6275 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
6276
6277 /* common for all formats */
6278 pDst->FCW = pSrc->FCW;
6279 pDst->FSW = pSrc->FSW;
6280 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
6281 pDst->FOP = pSrc->FOP;
6282 pDst->MXCSR = pSrc->MXCSR;
6283 pDst->MXCSR_MASK = pSrc->MXCSR_MASK;
6284 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
6285 {
6286 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
6287 * them for now... */
6288 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
6289 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
6290 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
6291 pDst->aRegs[i].au32[3] = 0;
6292 }
6293
6294 /* FPU IP, CS, DP and DS. */
6295 pDst->FPUIP = pSrc->FPUIP;
6296 pDst->CS = pSrc->CS;
6297 pDst->FPUDP = pSrc->FPUDP;
6298 pDst->DS = pSrc->DS;
6299 if (enmEffOpSize == IEMMODE_64BIT)
6300 {
6301 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
6302 pDst->Rsrvd1 = pSrc->Rsrvd1;
6303 pDst->Rsrvd2 = pSrc->Rsrvd2;
6304 pDst->au32RsrvdForSoftware[0] = 0;
6305 }
6306 else
6307 {
6308 pDst->Rsrvd1 = 0;
6309 pDst->Rsrvd2 = 0;
6310 pDst->au32RsrvdForSoftware[0] = X86_FXSTATE_RSVD_32BIT_MAGIC;
6311 }
6312
6313 /* XMM registers. */
6314 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
6315 || pIemCpu->enmCpuMode != IEMMODE_64BIT
6316 || pIemCpu->uCpl != 0)
6317 {
6318 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
6319 for (uint32_t i = 0; i < cXmmRegs; i++)
6320 pDst->aXMM[i] = pSrc->aXMM[i];
6321 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
6322 * right? */
6323 }
6324
6325 /*
6326 * Commit the memory.
6327 */
6328 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6329 if (rcStrict != VINF_SUCCESS)
6330 return rcStrict;
6331
6332 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6333 return VINF_SUCCESS;
6334}
6335
6336
6337/**
6338 * Implements 'FXRSTOR'.
6339 *
6340 * @param GCPtrEff The address of the image.
6341 * @param enmEffOpSize The operand size (only REX.W really matters).
6342 */
6343IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
6344{
6345 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6346
6347 /*
6348 * Raise exceptions.
6349 */
6350 if (pCtx->cr0 & X86_CR0_EM)
6351 return iemRaiseUndefinedOpcode(pIemCpu);
6352 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
6353 return iemRaiseDeviceNotAvailable(pIemCpu);
6354 if (GCPtrEff & 15)
6355 {
6356 /** @todo CPU/VM detection possible! \#AC might not be signal for
6357 * all/any misalignment sizes, intel says its an implementation detail. */
6358 if ( (pCtx->cr0 & X86_CR0_AM)
6359 && pCtx->eflags.Bits.u1AC
6360 && pIemCpu->uCpl == 3)
6361 return iemRaiseAlignmentCheckException(pIemCpu);
6362 return iemRaiseGeneralProtectionFault0(pIemCpu);
6363 }
6364
6365 /*
6366 * Access the memory.
6367 */
6368 void *pvMem512;
6369 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
6370 if (rcStrict != VINF_SUCCESS)
6371 return rcStrict;
6372 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
6373 PX86FXSTATE pDst = &pCtx->CTX_SUFF(pXState)->x87;
6374
6375 /*
6376 * Check the state for stuff which will #GP(0).
6377 */
6378 uint32_t const fMXCSR = pSrc->MXCSR;
6379 uint32_t const fMXCSR_MASK = pDst->MXCSR_MASK ? pDst->MXCSR_MASK : UINT32_C(0xffbf);
6380 if (fMXCSR & ~fMXCSR_MASK)
6381 {
6382 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
6383 return iemRaiseGeneralProtectionFault0(pIemCpu);
6384 }
6385
6386 /*
6387 * Load the registers.
6388 */
6389 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
6390 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
6391
6392 /* common for all formats */
6393 pDst->FCW = pSrc->FCW;
6394 pDst->FSW = pSrc->FSW;
6395 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
6396 pDst->FOP = pSrc->FOP;
6397 pDst->MXCSR = fMXCSR;
6398 /* (MXCSR_MASK is read-only) */
6399 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
6400 {
6401 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
6402 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
6403 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
6404 pDst->aRegs[i].au32[3] = 0;
6405 }
6406
6407 /* FPU IP, CS, DP and DS. */
6408 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6409 {
6410 pDst->FPUIP = pSrc->FPUIP;
6411 pDst->CS = pSrc->CS;
6412 pDst->Rsrvd1 = pSrc->Rsrvd1;
6413 pDst->FPUDP = pSrc->FPUDP;
6414 pDst->DS = pSrc->DS;
6415 pDst->Rsrvd2 = pSrc->Rsrvd2;
6416 }
6417 else
6418 {
6419 pDst->FPUIP = pSrc->FPUIP;
6420 pDst->CS = pSrc->CS;
6421 pDst->Rsrvd1 = 0;
6422 pDst->FPUDP = pSrc->FPUDP;
6423 pDst->DS = pSrc->DS;
6424 pDst->Rsrvd2 = 0;
6425 }
6426
6427 /* XMM registers. */
6428 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
6429 || pIemCpu->enmCpuMode != IEMMODE_64BIT
6430 || pIemCpu->uCpl != 0)
6431 {
6432 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
6433 for (uint32_t i = 0; i < cXmmRegs; i++)
6434 pDst->aXMM[i] = pSrc->aXMM[i];
6435 }
6436
6437 /*
6438 * Commit the memory.
6439 */
6440 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
6441 if (rcStrict != VINF_SUCCESS)
6442 return rcStrict;
6443
6444 iemHlpUsedFpu(pIemCpu);
6445 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6446 return VINF_SUCCESS;
6447}
6448
6449
6450/**
6451 * Commmon routine for fnstenv and fnsave.
6452 *
6453 * @param uPtr Where to store the state.
6454 * @param pCtx The CPU context.
6455 */
6456static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
6457{
6458 PCX86FXSTATE pSrcX87 = &pCtx->CTX_SUFF(pXState)->x87;
6459 if (enmEffOpSize == IEMMODE_16BIT)
6460 {
6461 uPtr.pu16[0] = pSrcX87->FCW;
6462 uPtr.pu16[1] = pSrcX87->FSW;
6463 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87);
6464 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6465 {
6466 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
6467 * protected mode or long mode and we save it in real mode? And vice
6468 * versa? And with 32-bit operand size? I think CPU is storing the
6469 * effective address ((CS << 4) + IP) in the offset register and not
6470 * doing any address calculations here. */
6471 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP;
6472 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP;
6473 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP;
6474 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000);
6475 }
6476 else
6477 {
6478 uPtr.pu16[3] = pSrcX87->FPUIP;
6479 uPtr.pu16[4] = pSrcX87->CS;
6480 uPtr.pu16[5] = pSrcX87->FPUDP;
6481 uPtr.pu16[6] = pSrcX87->DS;
6482 }
6483 }
6484 else
6485 {
6486 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
6487 uPtr.pu16[0*2] = pSrcX87->FCW;
6488 uPtr.pu16[1*2] = pSrcX87->FSW;
6489 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87);
6490 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6491 {
6492 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP;
6493 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP;
6494 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP;
6495 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4;
6496 }
6497 else
6498 {
6499 uPtr.pu32[3] = pSrcX87->FPUIP;
6500 uPtr.pu16[4*2] = pSrcX87->CS;
6501 uPtr.pu16[4*2+1]= pSrcX87->FOP;
6502 uPtr.pu32[5] = pSrcX87->FPUDP;
6503 uPtr.pu16[6*2] = pSrcX87->DS;
6504 }
6505 }
6506}
6507
6508
6509/**
6510 * Commmon routine for fldenv and frstor
6511 *
6512 * @param uPtr Where to store the state.
6513 * @param pCtx The CPU context.
6514 */
6515static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
6516{
6517 PX86FXSTATE pDstX87 = &pCtx->CTX_SUFF(pXState)->x87;
6518 if (enmEffOpSize == IEMMODE_16BIT)
6519 {
6520 pDstX87->FCW = uPtr.pu16[0];
6521 pDstX87->FSW = uPtr.pu16[1];
6522 pDstX87->FTW = uPtr.pu16[2];
6523 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6524 {
6525 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
6526 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
6527 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
6528 pDstX87->CS = 0;
6529 pDstX87->Rsrvd1= 0;
6530 pDstX87->DS = 0;
6531 pDstX87->Rsrvd2= 0;
6532 }
6533 else
6534 {
6535 pDstX87->FPUIP = uPtr.pu16[3];
6536 pDstX87->CS = uPtr.pu16[4];
6537 pDstX87->Rsrvd1= 0;
6538 pDstX87->FPUDP = uPtr.pu16[5];
6539 pDstX87->DS = uPtr.pu16[6];
6540 pDstX87->Rsrvd2= 0;
6541 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
6542 }
6543 }
6544 else
6545 {
6546 pDstX87->FCW = uPtr.pu16[0*2];
6547 pDstX87->FSW = uPtr.pu16[1*2];
6548 pDstX87->FTW = uPtr.pu16[2*2];
6549 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6550 {
6551 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
6552 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
6553 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
6554 pDstX87->CS = 0;
6555 pDstX87->Rsrvd1= 0;
6556 pDstX87->DS = 0;
6557 pDstX87->Rsrvd2= 0;
6558 }
6559 else
6560 {
6561 pDstX87->FPUIP = uPtr.pu32[3];
6562 pDstX87->CS = uPtr.pu16[4*2];
6563 pDstX87->Rsrvd1= 0;
6564 pDstX87->FOP = uPtr.pu16[4*2+1];
6565 pDstX87->FPUDP = uPtr.pu32[5];
6566 pDstX87->DS = uPtr.pu16[6*2];
6567 pDstX87->Rsrvd2= 0;
6568 }
6569 }
6570
6571 /* Make adjustments. */
6572 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW);
6573 pDstX87->FCW &= ~X86_FCW_ZERO_MASK;
6574 iemFpuRecalcExceptionStatus(pDstX87);
6575 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
6576 * exceptions are pending after loading the saved state? */
6577}
6578
6579
6580/**
6581 * Implements 'FNSTENV'.
6582 *
6583 * @param enmEffOpSize The operand size (only REX.W really matters).
6584 * @param iEffSeg The effective segment register for @a GCPtrEff.
6585 * @param GCPtrEffDst The address of the image.
6586 */
6587IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
6588{
6589 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6590 RTPTRUNION uPtr;
6591 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
6592 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6593 if (rcStrict != VINF_SUCCESS)
6594 return rcStrict;
6595
6596 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6597
6598 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6599 if (rcStrict != VINF_SUCCESS)
6600 return rcStrict;
6601
6602 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
6603 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6604 return VINF_SUCCESS;
6605}
6606
6607
6608/**
6609 * Implements 'FNSAVE'.
6610 *
6611 * @param GCPtrEffDst The address of the image.
6612 * @param enmEffOpSize The operand size.
6613 */
6614IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
6615{
6616 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6617 RTPTRUNION uPtr;
6618 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
6619 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6620 if (rcStrict != VINF_SUCCESS)
6621 return rcStrict;
6622
6623 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6624 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6625 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
6626 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
6627 {
6628 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0];
6629 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1];
6630 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4];
6631 }
6632
6633 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6634 if (rcStrict != VINF_SUCCESS)
6635 return rcStrict;
6636
6637 /*
6638 * Re-initialize the FPU context.
6639 */
6640 pFpuCtx->FCW = 0x37f;
6641 pFpuCtx->FSW = 0;
6642 pFpuCtx->FTW = 0x00; /* 0 - empty */
6643 pFpuCtx->FPUDP = 0;
6644 pFpuCtx->DS = 0;
6645 pFpuCtx->Rsrvd2= 0;
6646 pFpuCtx->FPUIP = 0;
6647 pFpuCtx->CS = 0;
6648 pFpuCtx->Rsrvd1= 0;
6649 pFpuCtx->FOP = 0;
6650
6651 iemHlpUsedFpu(pIemCpu);
6652 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6653 return VINF_SUCCESS;
6654}
6655
6656
6657
6658/**
6659 * Implements 'FLDENV'.
6660 *
6661 * @param enmEffOpSize The operand size (only REX.W really matters).
6662 * @param iEffSeg The effective segment register for @a GCPtrEff.
6663 * @param GCPtrEffSrc The address of the image.
6664 */
6665IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
6666{
6667 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6668 RTCPTRUNION uPtr;
6669 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
6670 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
6671 if (rcStrict != VINF_SUCCESS)
6672 return rcStrict;
6673
6674 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6675
6676 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
6677 if (rcStrict != VINF_SUCCESS)
6678 return rcStrict;
6679
6680 iemHlpUsedFpu(pIemCpu);
6681 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6682 return VINF_SUCCESS;
6683}
6684
6685
6686/**
6687 * Implements 'FRSTOR'.
6688 *
6689 * @param GCPtrEffSrc The address of the image.
6690 * @param enmEffOpSize The operand size.
6691 */
6692IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
6693{
6694 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6695 RTCPTRUNION uPtr;
6696 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
6697 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
6698 if (rcStrict != VINF_SUCCESS)
6699 return rcStrict;
6700
6701 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6702 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6703 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
6704 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
6705 {
6706 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0];
6707 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1];
6708 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4];
6709 pFpuCtx->aRegs[i].au32[3] = 0;
6710 }
6711
6712 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
6713 if (rcStrict != VINF_SUCCESS)
6714 return rcStrict;
6715
6716 iemHlpUsedFpu(pIemCpu);
6717 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6718 return VINF_SUCCESS;
6719}
6720
6721
6722/**
6723 * Implements 'FLDCW'.
6724 *
6725 * @param u16Fcw The new FCW.
6726 */
6727IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
6728{
6729 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6730
6731 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
6732 /** @todo Testcase: Try see what happens when trying to set undefined bits
6733 * (other than 6 and 7). Currently ignoring them. */
6734 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
6735 * according to FSW. (This is was is currently implemented.) */
6736 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6737 pFpuCtx->FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
6738 iemFpuRecalcExceptionStatus(pFpuCtx);
6739
6740 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
6741 iemHlpUsedFpu(pIemCpu);
6742 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6743 return VINF_SUCCESS;
6744}
6745
6746
6747
6748/**
6749 * Implements the underflow case of fxch.
6750 *
6751 * @param iStReg The other stack register.
6752 */
6753IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
6754{
6755 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6756
6757 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6758 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW);
6759 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
6760 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW));
6761
6762 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
6763 * registers are read as QNaN and then exchanged. This could be
6764 * wrong... */
6765 if (pFpuCtx->FCW & X86_FCW_IM)
6766 {
6767 if (RT_BIT(iReg1) & pFpuCtx->FTW)
6768 {
6769 if (RT_BIT(iReg2) & pFpuCtx->FTW)
6770 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6771 else
6772 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80;
6773 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
6774 }
6775 else
6776 {
6777 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80;
6778 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6779 }
6780 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6781 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
6782 }
6783 else
6784 {
6785 /* raise underflow exception, don't change anything. */
6786 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
6787 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6788 }
6789
6790 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
6791 iemHlpUsedFpu(pIemCpu);
6792 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6793 return VINF_SUCCESS;
6794}
6795
6796
6797/**
6798 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
6799 *
6800 * @param cToAdd 1 or 7.
6801 */
6802IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
6803{
6804 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6805 Assert(iStReg < 8);
6806
6807 /*
6808 * Raise exceptions.
6809 */
6810 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
6811 return iemRaiseDeviceNotAvailable(pIemCpu);
6812
6813 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6814 uint16_t u16Fsw = pFpuCtx->FSW;
6815 if (u16Fsw & X86_FSW_ES)
6816 return iemRaiseMathFault(pIemCpu);
6817
6818 /*
6819 * Check if any of the register accesses causes #SF + #IA.
6820 */
6821 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
6822 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
6823 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
6824 {
6825 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
6826 NOREF(u32Eflags);
6827
6828 pFpuCtx->FSW &= ~X86_FSW_C1;
6829 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
6830 if ( !(u16Fsw & X86_FSW_IE)
6831 || (pFpuCtx->FCW & X86_FCW_IM) )
6832 {
6833 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
6834 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
6835 }
6836 }
6837 else if (pFpuCtx->FCW & X86_FCW_IM)
6838 {
6839 /* Masked underflow. */
6840 pFpuCtx->FSW &= ~X86_FSW_C1;
6841 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6842 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
6843 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
6844 }
6845 else
6846 {
6847 /* Raise underflow - don't touch EFLAGS or TOP. */
6848 pFpuCtx->FSW &= ~X86_FSW_C1;
6849 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6850 fPop = false;
6851 }
6852
6853 /*
6854 * Pop if necessary.
6855 */
6856 if (fPop)
6857 {
6858 pFpuCtx->FTW &= ~RT_BIT(iReg1);
6859 pFpuCtx->FSW &= X86_FSW_TOP_MASK;
6860 pFpuCtx->FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
6861 }
6862
6863 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
6864 iemHlpUsedFpu(pIemCpu);
6865 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6866 return VINF_SUCCESS;
6867}
6868
6869/** @} */
6870
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette