VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 61031

Last change on this file since 61031 was 61031, checked in by vboxsync, 9 years ago

Oops, committed too much!

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 236.8 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 61031 2016-05-18 11:12:00Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23
24/**
25 * Worker function for iemHlpCheckPortIOPermission, don't call directly.
26 *
27 * @returns Strict VBox status code.
28 *
29 * @param pIemCpu The IEM per CPU data.
30 * @param pCtx The register context.
31 * @param u16Port The port number.
32 * @param cbOperand The operand size.
33 */
34static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
35{
36 /* The TSS bits we're interested in are the same on 386 and AMD64. */
37 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY);
38 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
39 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap);
40 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64));
41
42 /*
43 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
44 */
45 Assert(!pCtx->tr.Attr.n.u1DescType);
46 if (RT_UNLIKELY( pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY
47 && pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))
48 {
49 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
50 u16Port, cbOperand, pCtx->tr.Attr.n.u4Type, pCtx->tr.Attr.u));
51 return iemRaiseGeneralProtectionFault0(pIemCpu);
52 }
53
54 /*
55 * Read the bitmap offset (may #PF).
56 */
57 uint16_t offBitmap;
58 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pIemCpu, &offBitmap, UINT8_MAX,
59 pCtx->tr.u64Base + RT_OFFSETOF(X86TSS64, offIoBitmap));
60 if (rcStrict != VINF_SUCCESS)
61 {
62 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
63 return rcStrict;
64 }
65
66 /*
67 * The bit range from u16Port to (u16Port + cbOperand - 1), however intel
68 * describes the CPU actually reading two bytes regardless of whether the
69 * bit range crosses a byte boundrary. Thus the + 1 in the test below.
70 */
71 uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap;
72 /** @todo check if real CPUs ensures that offBitmap has a minimum value of
73 * for instance sizeof(X86TSS32). */
74 if (offFirstBit + 1 > pCtx->tr.u32Limit) /* the limit is inclusive */
75 {
76 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
77 offFirstBit, pCtx->tr.u32Limit));
78 return iemRaiseGeneralProtectionFault0(pIemCpu);
79 }
80
81 /*
82 * Read the necessary bits.
83 */
84 /** @todo Test the assertion in the intel manual that the CPU reads two
85 * bytes. The question is how this works wrt to #PF and #GP on the
86 * 2nd byte when it's not required. */
87 uint16_t bmBytes = UINT16_MAX;
88 rcStrict = iemMemFetchSysU16(pIemCpu, &bmBytes, UINT8_MAX, pCtx->tr.u64Base + offFirstBit);
89 if (rcStrict != VINF_SUCCESS)
90 {
91 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
92 return rcStrict;
93 }
94
95 /*
96 * Perform the check.
97 */
98 uint16_t fPortMask = (1 << cbOperand) - 1;
99 bmBytes >>= (u16Port & 7);
100 if (bmBytes & fPortMask)
101 {
102 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
103 u16Port, cbOperand, bmBytes, fPortMask));
104 return iemRaiseGeneralProtectionFault0(pIemCpu);
105 }
106
107 return VINF_SUCCESS;
108}
109
110
111/**
112 * Checks if we are allowed to access the given I/O port, raising the
113 * appropriate exceptions if we aren't (or if the I/O bitmap is not
114 * accessible).
115 *
116 * @returns Strict VBox status code.
117 *
118 * @param pIemCpu The IEM per CPU data.
119 * @param pCtx The register context.
120 * @param u16Port The port number.
121 * @param cbOperand The operand size.
122 */
123DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
124{
125 X86EFLAGS Efl;
126 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
127 if ( (pCtx->cr0 & X86_CR0_PE)
128 && ( pIemCpu->uCpl > Efl.Bits.u2IOPL
129 || Efl.Bits.u1VM) )
130 return iemHlpCheckPortIOPermissionBitmap(pIemCpu, pCtx, u16Port, cbOperand);
131 return VINF_SUCCESS;
132}
133
134
135#if 0
136/**
137 * Calculates the parity bit.
138 *
139 * @returns true if the bit is set, false if not.
140 * @param u8Result The least significant byte of the result.
141 */
142static bool iemHlpCalcParityFlag(uint8_t u8Result)
143{
144 /*
145 * Parity is set if the number of bits in the least significant byte of
146 * the result is even.
147 */
148 uint8_t cBits;
149 cBits = u8Result & 1; /* 0 */
150 u8Result >>= 1;
151 cBits += u8Result & 1;
152 u8Result >>= 1;
153 cBits += u8Result & 1;
154 u8Result >>= 1;
155 cBits += u8Result & 1;
156 u8Result >>= 1;
157 cBits += u8Result & 1; /* 4 */
158 u8Result >>= 1;
159 cBits += u8Result & 1;
160 u8Result >>= 1;
161 cBits += u8Result & 1;
162 u8Result >>= 1;
163 cBits += u8Result & 1;
164 return !(cBits & 1);
165}
166#endif /* not used */
167
168
169/**
170 * Updates the specified flags according to a 8-bit result.
171 *
172 * @param pIemCpu The IEM state of the calling EMT.
173 * @param u8Result The result to set the flags according to.
174 * @param fToUpdate The flags to update.
175 * @param fUndefined The flags that are specified as undefined.
176 */
177static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
178{
179 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
180
181 uint32_t fEFlags = pCtx->eflags.u;
182 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
183 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
184 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
185#ifdef IEM_VERIFICATION_MODE_FULL
186 pIemCpu->fUndefinedEFlags |= fUndefined;
187#endif
188}
189
190
191/**
192 * Helper used by iret.
193 *
194 * @param uCpl The new CPL.
195 * @param pSReg Pointer to the segment register.
196 */
197static void iemHlpAdjustSelectorForNewCpl(PIEMCPU pIemCpu, uint8_t uCpl, PCPUMSELREG pSReg)
198{
199#ifdef VBOX_WITH_RAW_MODE_NOT_R0
200 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
201 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
202#else
203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
204#endif
205
206 if ( uCpl > pSReg->Attr.n.u2Dpl
207 && pSReg->Attr.n.u1DescType /* code or data, not system */
208 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
209 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
210 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, 0);
211}
212
213
214/**
215 * Indicates that we have modified the FPU state.
216 *
217 * @param pIemCpu The IEM state of the calling EMT.
218 */
219DECLINLINE(void) iemHlpUsedFpu(PIEMCPU pIemCpu)
220{
221 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);
222}
223
224/** @} */
225
226/** @name C Implementations
227 * @{
228 */
229
230/**
231 * Implements a 16-bit popa.
232 */
233IEM_CIMPL_DEF_0(iemCImpl_popa_16)
234{
235 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
236 RTGCPTR GCPtrStart = iemRegGetEffRsp(pIemCpu, pCtx);
237 RTGCPTR GCPtrLast = GCPtrStart + 15;
238 VBOXSTRICTRC rcStrict;
239
240 /*
241 * The docs are a bit hard to comprehend here, but it looks like we wrap
242 * around in real mode as long as none of the individual "popa" crosses the
243 * end of the stack segment. In protected mode we check the whole access
244 * in one go. For efficiency, only do the word-by-word thing if we're in
245 * danger of wrapping around.
246 */
247 /** @todo do popa boundary / wrap-around checks. */
248 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
249 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
250 {
251 /* word-by-word */
252 RTUINT64U TmpRsp;
253 TmpRsp.u = pCtx->rsp;
254 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
255 if (rcStrict == VINF_SUCCESS)
256 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
257 if (rcStrict == VINF_SUCCESS)
258 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
259 if (rcStrict == VINF_SUCCESS)
260 {
261 iemRegAddToRspEx(pIemCpu, pCtx, &TmpRsp, 2); /* sp */
262 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
263 }
264 if (rcStrict == VINF_SUCCESS)
265 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
266 if (rcStrict == VINF_SUCCESS)
267 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
268 if (rcStrict == VINF_SUCCESS)
269 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
270 if (rcStrict == VINF_SUCCESS)
271 {
272 pCtx->rsp = TmpRsp.u;
273 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
274 }
275 }
276 else
277 {
278 uint16_t const *pa16Mem = NULL;
279 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
280 if (rcStrict == VINF_SUCCESS)
281 {
282 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
283 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
284 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
285 /* skip sp */
286 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
287 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
288 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
289 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
290 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
291 if (rcStrict == VINF_SUCCESS)
292 {
293 iemRegAddToRsp(pIemCpu, pCtx, 16);
294 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
295 }
296 }
297 }
298 return rcStrict;
299}
300
301
302/**
303 * Implements a 32-bit popa.
304 */
305IEM_CIMPL_DEF_0(iemCImpl_popa_32)
306{
307 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
308 RTGCPTR GCPtrStart = iemRegGetEffRsp(pIemCpu, pCtx);
309 RTGCPTR GCPtrLast = GCPtrStart + 31;
310 VBOXSTRICTRC rcStrict;
311
312 /*
313 * The docs are a bit hard to comprehend here, but it looks like we wrap
314 * around in real mode as long as none of the individual "popa" crosses the
315 * end of the stack segment. In protected mode we check the whole access
316 * in one go. For efficiency, only do the word-by-word thing if we're in
317 * danger of wrapping around.
318 */
319 /** @todo do popa boundary / wrap-around checks. */
320 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
321 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
322 {
323 /* word-by-word */
324 RTUINT64U TmpRsp;
325 TmpRsp.u = pCtx->rsp;
326 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
327 if (rcStrict == VINF_SUCCESS)
328 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
329 if (rcStrict == VINF_SUCCESS)
330 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
331 if (rcStrict == VINF_SUCCESS)
332 {
333 iemRegAddToRspEx(pIemCpu, pCtx, &TmpRsp, 2); /* sp */
334 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
335 }
336 if (rcStrict == VINF_SUCCESS)
337 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
338 if (rcStrict == VINF_SUCCESS)
339 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
340 if (rcStrict == VINF_SUCCESS)
341 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
342 if (rcStrict == VINF_SUCCESS)
343 {
344#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
345 pCtx->rdi &= UINT32_MAX;
346 pCtx->rsi &= UINT32_MAX;
347 pCtx->rbp &= UINT32_MAX;
348 pCtx->rbx &= UINT32_MAX;
349 pCtx->rdx &= UINT32_MAX;
350 pCtx->rcx &= UINT32_MAX;
351 pCtx->rax &= UINT32_MAX;
352#endif
353 pCtx->rsp = TmpRsp.u;
354 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
355 }
356 }
357 else
358 {
359 uint32_t const *pa32Mem;
360 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
361 if (rcStrict == VINF_SUCCESS)
362 {
363 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
364 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
365 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
366 /* skip esp */
367 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
368 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
369 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
370 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
371 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
372 if (rcStrict == VINF_SUCCESS)
373 {
374 iemRegAddToRsp(pIemCpu, pCtx, 32);
375 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
376 }
377 }
378 }
379 return rcStrict;
380}
381
382
383/**
384 * Implements a 16-bit pusha.
385 */
386IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
387{
388 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
389 RTGCPTR GCPtrTop = iemRegGetEffRsp(pIemCpu, pCtx);
390 RTGCPTR GCPtrBottom = GCPtrTop - 15;
391 VBOXSTRICTRC rcStrict;
392
393 /*
394 * The docs are a bit hard to comprehend here, but it looks like we wrap
395 * around in real mode as long as none of the individual "pushd" crosses the
396 * end of the stack segment. In protected mode we check the whole access
397 * in one go. For efficiency, only do the word-by-word thing if we're in
398 * danger of wrapping around.
399 */
400 /** @todo do pusha boundary / wrap-around checks. */
401 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
402 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
403 {
404 /* word-by-word */
405 RTUINT64U TmpRsp;
406 TmpRsp.u = pCtx->rsp;
407 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
408 if (rcStrict == VINF_SUCCESS)
409 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
410 if (rcStrict == VINF_SUCCESS)
411 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
412 if (rcStrict == VINF_SUCCESS)
413 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
414 if (rcStrict == VINF_SUCCESS)
415 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
416 if (rcStrict == VINF_SUCCESS)
417 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
418 if (rcStrict == VINF_SUCCESS)
419 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
420 if (rcStrict == VINF_SUCCESS)
421 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
422 if (rcStrict == VINF_SUCCESS)
423 {
424 pCtx->rsp = TmpRsp.u;
425 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
426 }
427 }
428 else
429 {
430 GCPtrBottom--;
431 uint16_t *pa16Mem = NULL;
432 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
433 if (rcStrict == VINF_SUCCESS)
434 {
435 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
436 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
437 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
438 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
439 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
440 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
441 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
442 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
443 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
444 if (rcStrict == VINF_SUCCESS)
445 {
446 iemRegSubFromRsp(pIemCpu, pCtx, 16);
447 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
448 }
449 }
450 }
451 return rcStrict;
452}
453
454
455/**
456 * Implements a 32-bit pusha.
457 */
458IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
459{
460 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
461 RTGCPTR GCPtrTop = iemRegGetEffRsp(pIemCpu, pCtx);
462 RTGCPTR GCPtrBottom = GCPtrTop - 31;
463 VBOXSTRICTRC rcStrict;
464
465 /*
466 * The docs are a bit hard to comprehend here, but it looks like we wrap
467 * around in real mode as long as none of the individual "pusha" crosses the
468 * end of the stack segment. In protected mode we check the whole access
469 * in one go. For efficiency, only do the word-by-word thing if we're in
470 * danger of wrapping around.
471 */
472 /** @todo do pusha boundary / wrap-around checks. */
473 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
474 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
475 {
476 /* word-by-word */
477 RTUINT64U TmpRsp;
478 TmpRsp.u = pCtx->rsp;
479 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
480 if (rcStrict == VINF_SUCCESS)
481 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
482 if (rcStrict == VINF_SUCCESS)
483 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
484 if (rcStrict == VINF_SUCCESS)
485 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
486 if (rcStrict == VINF_SUCCESS)
487 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
488 if (rcStrict == VINF_SUCCESS)
489 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
490 if (rcStrict == VINF_SUCCESS)
491 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
492 if (rcStrict == VINF_SUCCESS)
493 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
494 if (rcStrict == VINF_SUCCESS)
495 {
496 pCtx->rsp = TmpRsp.u;
497 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
498 }
499 }
500 else
501 {
502 GCPtrBottom--;
503 uint32_t *pa32Mem;
504 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
505 if (rcStrict == VINF_SUCCESS)
506 {
507 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
508 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
509 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
510 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
511 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
512 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
513 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
514 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
515 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
516 if (rcStrict == VINF_SUCCESS)
517 {
518 iemRegSubFromRsp(pIemCpu, pCtx, 32);
519 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
520 }
521 }
522 }
523 return rcStrict;
524}
525
526
527/**
528 * Implements pushf.
529 *
530 *
531 * @param enmEffOpSize The effective operand size.
532 */
533IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
534{
535 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
536
537 /*
538 * If we're in V8086 mode some care is required (which is why we're in
539 * doing this in a C implementation).
540 */
541 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
542 if ( (fEfl & X86_EFL_VM)
543 && X86_EFL_GET_IOPL(fEfl) != 3 )
544 {
545 Assert(pCtx->cr0 & X86_CR0_PE);
546 if ( enmEffOpSize != IEMMODE_16BIT
547 || !(pCtx->cr4 & X86_CR4_VME))
548 return iemRaiseGeneralProtectionFault0(pIemCpu);
549 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
550 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
551 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
552 }
553
554 /*
555 * Ok, clear RF and VM, adjust for ancient CPUs, and push the flags.
556 */
557 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
558
559 VBOXSTRICTRC rcStrict;
560 switch (enmEffOpSize)
561 {
562 case IEMMODE_16BIT:
563 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
564 if (IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_186)
565 fEfl |= UINT16_C(0xf000);
566 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
567 break;
568 case IEMMODE_32BIT:
569 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
570 break;
571 case IEMMODE_64BIT:
572 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
573 break;
574 IEM_NOT_REACHED_DEFAULT_CASE_RET();
575 }
576 if (rcStrict != VINF_SUCCESS)
577 return rcStrict;
578
579 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
580 return VINF_SUCCESS;
581}
582
583
584/**
585 * Implements popf.
586 *
587 * @param enmEffOpSize The effective operand size.
588 */
589IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
590{
591 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
592 uint32_t const fEflOld = IEMMISC_GET_EFL(pIemCpu, pCtx);
593 VBOXSTRICTRC rcStrict;
594 uint32_t fEflNew;
595
596 /*
597 * V8086 is special as usual.
598 */
599 if (fEflOld & X86_EFL_VM)
600 {
601 /*
602 * Almost anything goes if IOPL is 3.
603 */
604 if (X86_EFL_GET_IOPL(fEflOld) == 3)
605 {
606 switch (enmEffOpSize)
607 {
608 case IEMMODE_16BIT:
609 {
610 uint16_t u16Value;
611 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
612 if (rcStrict != VINF_SUCCESS)
613 return rcStrict;
614 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
615 break;
616 }
617 case IEMMODE_32BIT:
618 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
619 if (rcStrict != VINF_SUCCESS)
620 return rcStrict;
621 break;
622 IEM_NOT_REACHED_DEFAULT_CASE_RET();
623 }
624
625 const uint32_t fPopfBits = IEMCPU_TO_VM(pIemCpu)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
626 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
627 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
628 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
629 }
630 /*
631 * Interrupt flag virtualization with CR4.VME=1.
632 */
633 else if ( enmEffOpSize == IEMMODE_16BIT
634 && (pCtx->cr4 & X86_CR4_VME) )
635 {
636 uint16_t u16Value;
637 RTUINT64U TmpRsp;
638 TmpRsp.u = pCtx->rsp;
639 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
640 if (rcStrict != VINF_SUCCESS)
641 return rcStrict;
642
643 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
644 * or before? */
645 if ( ( (u16Value & X86_EFL_IF)
646 && (fEflOld & X86_EFL_VIP))
647 || (u16Value & X86_EFL_TF) )
648 return iemRaiseGeneralProtectionFault0(pIemCpu);
649
650 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
651 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
652 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
653 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
654
655 pCtx->rsp = TmpRsp.u;
656 }
657 else
658 return iemRaiseGeneralProtectionFault0(pIemCpu);
659
660 }
661 /*
662 * Not in V8086 mode.
663 */
664 else
665 {
666 /* Pop the flags. */
667 switch (enmEffOpSize)
668 {
669 case IEMMODE_16BIT:
670 {
671 uint16_t u16Value;
672 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
673 if (rcStrict != VINF_SUCCESS)
674 return rcStrict;
675 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
676
677 /*
678 * Ancient CPU adjustments:
679 * - 8086, 80186, V20/30:
680 * Fixed bits 15:12 bits are not kept correctly internally, mostly for
681 * practical reasons (masking below). We add them when pushing flags.
682 * - 80286:
683 * The NT and IOPL flags cannot be popped from real mode and are
684 * therefore always zero (since a 286 can never exit from PM and
685 * their initial value is zero). This changed on a 386 and can
686 * therefore be used to detect 286 or 386 CPU in real mode.
687 */
688 if ( IEM_GET_TARGET_CPU(pIemCpu) == IEMTARGETCPU_286
689 && !(pCtx->cr0 & X86_CR0_PE) )
690 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL);
691 break;
692 }
693 case IEMMODE_32BIT:
694 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
695 if (rcStrict != VINF_SUCCESS)
696 return rcStrict;
697 break;
698 case IEMMODE_64BIT:
699 {
700 uint64_t u64Value;
701 rcStrict = iemMemStackPopU64(pIemCpu, &u64Value);
702 if (rcStrict != VINF_SUCCESS)
703 return rcStrict;
704 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
705 break;
706 }
707 IEM_NOT_REACHED_DEFAULT_CASE_RET();
708 }
709
710 /* Merge them with the current flags. */
711 const uint32_t fPopfBits = IEMCPU_TO_VM(pIemCpu)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
712 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
713 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
714 || pIemCpu->uCpl == 0)
715 {
716 fEflNew &= fPopfBits;
717 fEflNew |= ~fPopfBits & fEflOld;
718 }
719 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
720 {
721 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
722 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
723 }
724 else
725 {
726 fEflNew &= fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF);
727 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
728 }
729 }
730
731 /*
732 * Commit the flags.
733 */
734 Assert(fEflNew & RT_BIT_32(1));
735 IEMMISC_SET_EFL(pIemCpu, pCtx, fEflNew);
736 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
737
738 return VINF_SUCCESS;
739}
740
741
742/**
743 * Implements an indirect call.
744 *
745 * @param uNewPC The new program counter (RIP) value (loaded from the
746 * operand).
747 * @param enmEffOpSize The effective operand size.
748 */
749IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
750{
751 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
752 uint16_t uOldPC = pCtx->ip + cbInstr;
753 if (uNewPC > pCtx->cs.u32Limit)
754 return iemRaiseGeneralProtectionFault0(pIemCpu);
755
756 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
757 if (rcStrict != VINF_SUCCESS)
758 return rcStrict;
759
760 pCtx->rip = uNewPC;
761 pCtx->eflags.Bits.u1RF = 0;
762 return VINF_SUCCESS;
763}
764
765
766/**
767 * Implements a 16-bit relative call.
768 *
769 * @param offDisp The displacment offset.
770 */
771IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
772{
773 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
774 uint16_t uOldPC = pCtx->ip + cbInstr;
775 uint16_t uNewPC = uOldPC + offDisp;
776 if (uNewPC > pCtx->cs.u32Limit)
777 return iemRaiseGeneralProtectionFault0(pIemCpu);
778
779 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
780 if (rcStrict != VINF_SUCCESS)
781 return rcStrict;
782
783 pCtx->rip = uNewPC;
784 pCtx->eflags.Bits.u1RF = 0;
785 return VINF_SUCCESS;
786}
787
788
789/**
790 * Implements a 32-bit indirect call.
791 *
792 * @param uNewPC The new program counter (RIP) value (loaded from the
793 * operand).
794 * @param enmEffOpSize The effective operand size.
795 */
796IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
797{
798 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
799 uint32_t uOldPC = pCtx->eip + cbInstr;
800 if (uNewPC > pCtx->cs.u32Limit)
801 return iemRaiseGeneralProtectionFault0(pIemCpu);
802
803 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
804 if (rcStrict != VINF_SUCCESS)
805 return rcStrict;
806
807#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE) && defined(VBOX_WITH_CALL_RECORD)
808 /*
809 * CASM hook for recording interesting indirect calls.
810 */
811 if ( !pCtx->eflags.Bits.u1IF
812 && (pCtx->cr0 & X86_CR0_PG)
813 && !CSAMIsEnabled(IEMCPU_TO_VM(pIemCpu))
814 && pIemCpu->uCpl == 0)
815 {
816 EMSTATE enmState = EMGetState(IEMCPU_TO_VMCPU(pIemCpu));
817 if ( enmState == EMSTATE_IEM_THEN_REM
818 || enmState == EMSTATE_IEM
819 || enmState == EMSTATE_REM)
820 CSAMR3RecordCallAddress(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
821 }
822#endif
823
824 pCtx->rip = uNewPC;
825 pCtx->eflags.Bits.u1RF = 0;
826 return VINF_SUCCESS;
827}
828
829
830/**
831 * Implements a 32-bit relative call.
832 *
833 * @param offDisp The displacment offset.
834 */
835IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
836{
837 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
838 uint32_t uOldPC = pCtx->eip + cbInstr;
839 uint32_t uNewPC = uOldPC + offDisp;
840 if (uNewPC > pCtx->cs.u32Limit)
841 return iemRaiseGeneralProtectionFault0(pIemCpu);
842
843 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
844 if (rcStrict != VINF_SUCCESS)
845 return rcStrict;
846
847 pCtx->rip = uNewPC;
848 pCtx->eflags.Bits.u1RF = 0;
849 return VINF_SUCCESS;
850}
851
852
853/**
854 * Implements a 64-bit indirect call.
855 *
856 * @param uNewPC The new program counter (RIP) value (loaded from the
857 * operand).
858 * @param enmEffOpSize The effective operand size.
859 */
860IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
861{
862 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
863 uint64_t uOldPC = pCtx->rip + cbInstr;
864 if (!IEM_IS_CANONICAL(uNewPC))
865 return iemRaiseGeneralProtectionFault0(pIemCpu);
866
867 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
868 if (rcStrict != VINF_SUCCESS)
869 return rcStrict;
870
871 pCtx->rip = uNewPC;
872 pCtx->eflags.Bits.u1RF = 0;
873 return VINF_SUCCESS;
874}
875
876
877/**
878 * Implements a 64-bit relative call.
879 *
880 * @param offDisp The displacment offset.
881 */
882IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
883{
884 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
885 uint64_t uOldPC = pCtx->rip + cbInstr;
886 uint64_t uNewPC = uOldPC + offDisp;
887 if (!IEM_IS_CANONICAL(uNewPC))
888 return iemRaiseNotCanonical(pIemCpu);
889
890 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
891 if (rcStrict != VINF_SUCCESS)
892 return rcStrict;
893
894 pCtx->rip = uNewPC;
895 pCtx->eflags.Bits.u1RF = 0;
896 return VINF_SUCCESS;
897}
898
899
900/**
901 * Implements far jumps and calls thru task segments (TSS).
902 *
903 * @param uSel The selector.
904 * @param enmBranch The kind of branching we're performing.
905 * @param enmEffOpSize The effective operand size.
906 * @param pDesc The descriptor corresponding to @a uSel. The type is
907 * task gate.
908 */
909IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
910{
911#ifndef IEM_IMPLEMENTS_TASKSWITCH
912 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
913#else
914 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
915 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
916 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
917
918 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl
919 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
920 {
921 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
922 pIemCpu->uCpl, (uSel & X86_SEL_RPL)));
923 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
924 }
925
926 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
927 * far calls (see iemCImpl_callf). Most likely in both cases it should be
928 * checked here, need testcases. */
929 if (!pDesc->Legacy.Gen.u1Present)
930 {
931 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel));
932 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
933 }
934
935 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
936 uint32_t uNextEip = pCtx->eip + cbInstr;
937 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
938 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc);
939#endif
940}
941
942
943/**
944 * Implements far jumps and calls thru task gates.
945 *
946 * @param uSel The selector.
947 * @param enmBranch The kind of branching we're performing.
948 * @param enmEffOpSize The effective operand size.
949 * @param pDesc The descriptor corresponding to @a uSel. The type is
950 * task gate.
951 */
952IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
953{
954#ifndef IEM_IMPLEMENTS_TASKSWITCH
955 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
956#else
957 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
958
959 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl
960 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
961 {
962 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
963 pIemCpu->uCpl, (uSel & X86_SEL_RPL)));
964 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
965 }
966
967 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
968 * far calls (see iemCImpl_callf). Most likely in both cases it should be
969 * checked here, need testcases. */
970 if (!pDesc->Legacy.Gen.u1Present)
971 {
972 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel));
973 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
974 }
975
976 /*
977 * Fetch the new TSS descriptor from the GDT.
978 */
979 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel;
980 if (uSelTss & X86_SEL_LDT)
981 {
982 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss));
983 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
984 }
985
986 IEMSELDESC TssDesc;
987 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &TssDesc, uSelTss, X86_XCPT_GP);
988 if (rcStrict != VINF_SUCCESS)
989 return rcStrict;
990
991 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK)
992 {
993 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss,
994 TssDesc.Legacy.Gate.u4Type));
995 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
996 }
997
998 if (!TssDesc.Legacy.Gate.u1Present)
999 {
1000 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss));
1001 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSelTss & X86_SEL_MASK_OFF_RPL);
1002 }
1003
1004 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1005 uint32_t uNextEip = pCtx->eip + cbInstr;
1006 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1007 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc);
1008#endif
1009}
1010
1011
1012/**
1013 * Implements far jumps and calls thru call gates.
1014 *
1015 * @param uSel The selector.
1016 * @param enmBranch The kind of branching we're performing.
1017 * @param enmEffOpSize The effective operand size.
1018 * @param pDesc The descriptor corresponding to @a uSel. The type is
1019 * call gate.
1020 */
1021IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1022{
1023#ifndef IEM_IMPLEMENTS_CALLGATE
1024 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1025#else
1026 /* NB: Far jumps can only do intra-privilege transfers. Far calls support
1027 * inter-privilege calls and are much more complex.
1028 *
1029 * NB: 64-bit call gate has the same type as a 32-bit call gate! If
1030 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
1031 * must be 16-bit or 32-bit.
1032 */
1033 /** @todo: effective operand size is probably irrelevant here, only the
1034 * call gate bitness matters??
1035 */
1036 VBOXSTRICTRC rcStrict;
1037 RTPTRUNION uPtrRet;
1038 uint64_t uNewRsp;
1039 uint64_t uNewRip;
1040 uint64_t u64Base;
1041 uint32_t cbLimit;
1042 RTSEL uNewCS;
1043 IEMSELDESC DescCS;
1044 PCPUMCTX pCtx;
1045
1046 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE);
1047 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1048 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE
1049 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE);
1050
1051 /* Determine the new instruction pointer from the gate descriptor. */
1052 uNewRip = pDesc->Legacy.Gate.u16OffsetLow
1053 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16)
1054 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32);
1055
1056 /* Perform DPL checks on the gate descriptor. */
1057 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl
1058 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1059 {
1060 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1061 pIemCpu->uCpl, (uSel & X86_SEL_RPL)));
1062 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1063 }
1064
1065 /** @todo does this catch NULL selectors, too? */
1066 if (!pDesc->Legacy.Gen.u1Present)
1067 {
1068 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel));
1069 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1070 }
1071
1072 /*
1073 * Fetch the target CS descriptor from the GDT or LDT.
1074 */
1075 uNewCS = pDesc->Legacy.Gate.u16Sel;
1076 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_GP);
1077 if (rcStrict != VINF_SUCCESS)
1078 return rcStrict;
1079
1080 /* Target CS must be a code selector. */
1081 if ( !DescCS.Legacy.Gen.u1DescType
1082 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1083 {
1084 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1085 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1086 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1087 }
1088
1089 /* Privilege checks on target CS. */
1090 if (enmBranch == IEMBRANCH_JUMP)
1091 {
1092 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1093 {
1094 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
1095 {
1096 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1097 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1098 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1099 }
1100 }
1101 else
1102 {
1103 if (DescCS.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
1104 {
1105 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1106 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1107 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1108 }
1109 }
1110 }
1111 else
1112 {
1113 Assert(enmBranch == IEMBRANCH_CALL);
1114 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
1115 {
1116 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1117 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1118 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1119 }
1120 }
1121
1122 /* Additional long mode checks. */
1123 if (IEM_IS_LONG_MODE(pIemCpu))
1124 {
1125 if (!DescCS.Legacy.Gen.u1Long)
1126 {
1127 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS));
1128 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1129 }
1130
1131 /* L vs D. */
1132 if ( DescCS.Legacy.Gen.u1Long
1133 && DescCS.Legacy.Gen.u1DefBig)
1134 {
1135 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS));
1136 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1137 }
1138 }
1139
1140 if (!DescCS.Legacy.Gate.u1Present)
1141 {
1142 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS));
1143 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCS);
1144 }
1145
1146 pCtx = pIemCpu->CTX_SUFF(pCtx);
1147
1148 if (enmBranch == IEMBRANCH_JUMP)
1149 {
1150 /** @todo: This is very similar to regular far jumps; merge! */
1151 /* Jumps are fairly simple... */
1152
1153 /* Chop the high bits off if 16-bit gate (Intel says so). */
1154 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1155 uNewRip = (uint16_t)uNewRip;
1156
1157 /* Limit check for non-long segments. */
1158 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1159 if (DescCS.Legacy.Gen.u1Long)
1160 u64Base = 0;
1161 else
1162 {
1163 if (uNewRip > cbLimit)
1164 {
1165 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
1166 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
1167 }
1168 u64Base = X86DESC_BASE(&DescCS.Legacy);
1169 }
1170
1171 /* Canonical address check. */
1172 if (!IEM_IS_CANONICAL(uNewRip))
1173 {
1174 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1175 return iemRaiseNotCanonical(pIemCpu);
1176 }
1177
1178 /*
1179 * Ok, everything checked out fine. Now set the accessed bit before
1180 * committing the result into CS, CSHID and RIP.
1181 */
1182 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1183 {
1184 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
1185 if (rcStrict != VINF_SUCCESS)
1186 return rcStrict;
1187 /** @todo check what VT-x and AMD-V does. */
1188 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1189 }
1190
1191 /* commit */
1192 pCtx->rip = uNewRip;
1193 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1194 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1195 pCtx->cs.ValidSel = pCtx->cs.Sel;
1196 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1197 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1198 pCtx->cs.u32Limit = cbLimit;
1199 pCtx->cs.u64Base = u64Base;
1200 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1201 }
1202 else
1203 {
1204 Assert(enmBranch == IEMBRANCH_CALL);
1205 /* Calls are much more complicated. */
1206
1207 if (DescCS.Legacy.Gen.u2Dpl < pIemCpu->uCpl)
1208 {
1209 uint16_t offNewStack; /* Offset of new stack in TSS. */
1210 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */
1211 uint8_t uNewCSDpl;
1212 uint8_t cbWords;
1213 RTSEL uNewSS;
1214 RTSEL uOldSS;
1215 uint64_t uOldRsp;
1216 IEMSELDESC DescSS;
1217 RTPTRUNION uPtrTSS;
1218 RTGCPTR GCPtrTSS;
1219 RTPTRUNION uPtrParmWds;
1220 RTGCPTR GCPtrParmWds;
1221
1222 /* More privilege. This is the fun part. */
1223 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */
1224
1225 /*
1226 * Determine new SS:rSP from the TSS.
1227 */
1228 Assert(!pCtx->tr.Attr.n.u1DescType);
1229
1230 /* Figure out where the new stack pointer is stored in the TSS. */
1231 uNewCSDpl = uNewCS & X86_SEL_RPL;
1232 if (!IEM_IS_LONG_MODE(pIemCpu))
1233 {
1234 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1235 {
1236 offNewStack = RT_OFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8;
1237 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0);
1238 }
1239 else
1240 {
1241 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1242 offNewStack = RT_OFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4;
1243 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0);
1244 }
1245 }
1246 else
1247 {
1248 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1249 offNewStack = RT_OFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0);
1250 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0);
1251 }
1252
1253 /* Check against TSS limit. */
1254 if ((uint16_t)(offNewStack + cbNewStack - 1) > pCtx->tr.u32Limit)
1255 {
1256 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pCtx->tr.u32Limit));
1257 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, pCtx->tr.Sel);
1258 }
1259
1260 GCPtrTSS = pCtx->tr.u64Base + offNewStack;
1261 rcStrict = iemMemMap(pIemCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R);
1262 if (rcStrict != VINF_SUCCESS)
1263 {
1264 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1265 return rcStrict;
1266 }
1267
1268 if (!IEM_IS_LONG_MODE(pIemCpu))
1269 {
1270 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1271 {
1272 uNewRsp = uPtrTSS.pu32[0];
1273 uNewSS = uPtrTSS.pu16[2];
1274 }
1275 else
1276 {
1277 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1278 uNewRsp = uPtrTSS.pu16[0];
1279 uNewSS = uPtrTSS.pu16[1];
1280 }
1281 }
1282 else
1283 {
1284 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1285 /* SS will be a NULL selector, but that's valid. */
1286 uNewRsp = uPtrTSS.pu64[0];
1287 uNewSS = uNewCSDpl;
1288 }
1289
1290 /* Done with the TSS now. */
1291 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);
1292 if (rcStrict != VINF_SUCCESS)
1293 {
1294 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1295 return rcStrict;
1296 }
1297
1298 /* Only used outside of long mode. */
1299 cbWords = pDesc->Legacy.Gate.u4ParmCount;
1300
1301 /* If EFER.LMA is 0, there's extra work to do. */
1302 if (!IEM_IS_LONG_MODE(pIemCpu))
1303 {
1304 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
1305 {
1306 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
1307 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS);
1308 }
1309
1310 /* Grab the new SS descriptor. */
1311 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_SS);
1312 if (rcStrict != VINF_SUCCESS)
1313 return rcStrict;
1314
1315 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */
1316 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL))
1317 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl))
1318 {
1319 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n",
1320 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl));
1321 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS);
1322 }
1323
1324 /* Ensure new SS is a writable data segment. */
1325 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
1326 {
1327 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
1328 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS);
1329 }
1330
1331 if (!DescSS.Legacy.Gen.u1Present)
1332 {
1333 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS));
1334 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
1335 }
1336 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1337 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords);
1338 else
1339 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords);
1340 }
1341 else
1342 {
1343 /* Just grab the new (NULL) SS descriptor. */
1344 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_SS);
1345 if (rcStrict != VINF_SUCCESS)
1346 return rcStrict;
1347
1348 cbNewStack = sizeof(uint64_t) * 4;
1349 }
1350
1351 /** @todo: According to Intel, new stack is checked for enough space first,
1352 * then switched. According to AMD, the stack is switched first and
1353 * then pushes might fault!
1354 */
1355
1356 /** @todo: According to AMD, CS is loaded first, then SS.
1357 * According to Intel, it's the other way around!?
1358 */
1359
1360 /** @todo: Intel and AMD disagree on when exactly the CPL changes! */
1361
1362 /* Set the accessed bit before committing new SS. */
1363 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1364 {
1365 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
1366 if (rcStrict != VINF_SUCCESS)
1367 return rcStrict;
1368 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1369 }
1370
1371 /* Remember the old SS:rSP and their linear address. */
1372 uOldSS = pCtx->ss.Sel;
1373 uOldRsp = pCtx->rsp;
1374
1375 GCPtrParmWds = pCtx->ss.u64Base + pCtx->rsp;
1376
1377 /* Commit new SS:rSP. */
1378 pCtx->ss.Sel = uNewSS;
1379 pCtx->ss.ValidSel = uNewSS;
1380 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1381 pCtx->ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);
1382 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1383 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1384 pCtx->rsp = uNewRsp;
1385 pIemCpu->uCpl = uNewCSDpl;
1386 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
1387 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
1388
1389 /* Check new stack - may #SS(NewSS). */
1390 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbNewStack,
1391 &uPtrRet.pv, &uNewRsp);
1392 if (rcStrict != VINF_SUCCESS)
1393 {
1394 Log(("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1395 return rcStrict;
1396 }
1397
1398 if (!IEM_IS_LONG_MODE(pIemCpu))
1399 {
1400 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1401 {
1402 /* Push the old CS:rIP. */
1403 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1404 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1405
1406 /* Map the relevant chunk of the old stack. */
1407 rcStrict = iemMemMap(pIemCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1408 if (rcStrict != VINF_SUCCESS)
1409 {
1410 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1411 return rcStrict;
1412 }
1413
1414 /* Copy the parameter (d)words. */
1415 for (int i = 0; i < cbWords; ++i)
1416 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i];
1417
1418 /* Unmap the old stack. */
1419 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1420 if (rcStrict != VINF_SUCCESS)
1421 {
1422 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1423 return rcStrict;
1424 }
1425
1426 /* Push the old SS:rSP. */
1427 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
1428 uPtrRet.pu32[2 + cbWords + 1] = uOldSS;
1429 }
1430 else
1431 {
1432 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1433
1434 /* Push the old CS:rIP. */
1435 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1436 uPtrRet.pu16[1] = pCtx->cs.Sel;
1437
1438 /* Map the relevant chunk of the old stack. */
1439 rcStrict = iemMemMap(pIemCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1440 if (rcStrict != VINF_SUCCESS)
1441 {
1442 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1443 return rcStrict;
1444 }
1445
1446 /* Copy the parameter words. */
1447 for (int i = 0; i < cbWords; ++i)
1448 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i];
1449
1450 /* Unmap the old stack. */
1451 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1452 if (rcStrict != VINF_SUCCESS)
1453 {
1454 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1455 return rcStrict;
1456 }
1457
1458 /* Push the old SS:rSP. */
1459 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp;
1460 uPtrRet.pu16[2 + cbWords + 1] = uOldSS;
1461 }
1462 }
1463 else
1464 {
1465 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1466
1467 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
1468 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1469 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1470 uPtrRet.pu64[2] = uOldRsp;
1471 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */
1472 }
1473
1474 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1475 if (rcStrict != VINF_SUCCESS)
1476 {
1477 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1478 return rcStrict;
1479 }
1480
1481 /* Chop the high bits off if 16-bit gate (Intel says so). */
1482 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1483 uNewRip = (uint16_t)uNewRip;
1484
1485 /* Limit / canonical check. */
1486 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1487 if (!IEM_IS_LONG_MODE(pIemCpu))
1488 {
1489 if (uNewRip > cbLimit)
1490 {
1491 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1492 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
1493 }
1494 u64Base = X86DESC_BASE(&DescCS.Legacy);
1495 }
1496 else
1497 {
1498 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1499 if (!IEM_IS_CANONICAL(uNewRip))
1500 {
1501 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1502 return iemRaiseNotCanonical(pIemCpu);
1503 }
1504 u64Base = 0;
1505 }
1506
1507 /*
1508 * Now set the accessed bit before
1509 * writing the return address to the stack and committing the result into
1510 * CS, CSHID and RIP.
1511 */
1512 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1513 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1514 {
1515 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
1516 if (rcStrict != VINF_SUCCESS)
1517 return rcStrict;
1518 /** @todo check what VT-x and AMD-V does. */
1519 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1520 }
1521
1522 /* Commit new CS:rIP. */
1523 pCtx->rip = uNewRip;
1524 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1525 pCtx->cs.Sel |= pIemCpu->uCpl;
1526 pCtx->cs.ValidSel = pCtx->cs.Sel;
1527 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1528 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1529 pCtx->cs.u32Limit = cbLimit;
1530 pCtx->cs.u64Base = u64Base;
1531 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1532 }
1533 else
1534 {
1535 /* Same privilege. */
1536 /** @todo: This is very similar to regular far calls; merge! */
1537
1538 /* Check stack first - may #SS(0). */
1539 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
1540 * 16-bit code cause a two or four byte CS to be pushed? */
1541 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1542 IEM_IS_LONG_MODE(pIemCpu) ? 8+8
1543 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
1544 &uPtrRet.pv, &uNewRsp);
1545 if (rcStrict != VINF_SUCCESS)
1546 return rcStrict;
1547
1548 /* Chop the high bits off if 16-bit gate (Intel says so). */
1549 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1550 uNewRip = (uint16_t)uNewRip;
1551
1552 /* Limit / canonical check. */
1553 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1554 if (!IEM_IS_LONG_MODE(pIemCpu))
1555 {
1556 if (uNewRip > cbLimit)
1557 {
1558 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1559 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
1560 }
1561 u64Base = X86DESC_BASE(&DescCS.Legacy);
1562 }
1563 else
1564 {
1565 if (!IEM_IS_CANONICAL(uNewRip))
1566 {
1567 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1568 return iemRaiseNotCanonical(pIemCpu);
1569 }
1570 u64Base = 0;
1571 }
1572
1573 /*
1574 * Now set the accessed bit before
1575 * writing the return address to the stack and committing the result into
1576 * CS, CSHID and RIP.
1577 */
1578 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1579 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1580 {
1581 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
1582 if (rcStrict != VINF_SUCCESS)
1583 return rcStrict;
1584 /** @todo check what VT-x and AMD-V does. */
1585 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1586 }
1587
1588 /* stack */
1589 if (!IEM_IS_LONG_MODE(pIemCpu))
1590 {
1591 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1592 {
1593 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1594 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1595 }
1596 else
1597 {
1598 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1599 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1600 uPtrRet.pu16[1] = pCtx->cs.Sel;
1601 }
1602 }
1603 else
1604 {
1605 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1606 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1607 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1608 }
1609
1610 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1611 if (rcStrict != VINF_SUCCESS)
1612 return rcStrict;
1613
1614 /* commit */
1615 pCtx->rip = uNewRip;
1616 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1617 pCtx->cs.Sel |= pIemCpu->uCpl;
1618 pCtx->cs.ValidSel = pCtx->cs.Sel;
1619 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1620 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1621 pCtx->cs.u32Limit = cbLimit;
1622 pCtx->cs.u64Base = u64Base;
1623 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1624 }
1625 }
1626 pCtx->eflags.Bits.u1RF = 0;
1627 return VINF_SUCCESS;
1628#endif
1629}
1630
1631
1632/**
1633 * Implements far jumps and calls thru system selectors.
1634 *
1635 * @param uSel The selector.
1636 * @param enmBranch The kind of branching we're performing.
1637 * @param enmEffOpSize The effective operand size.
1638 * @param pDesc The descriptor corresponding to @a uSel.
1639 */
1640IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1641{
1642 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1643 Assert((uSel & X86_SEL_MASK_OFF_RPL));
1644
1645 if (IEM_IS_LONG_MODE(pIemCpu))
1646 switch (pDesc->Legacy.Gen.u4Type)
1647 {
1648 case AMD64_SEL_TYPE_SYS_CALL_GATE:
1649 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1650
1651 default:
1652 case AMD64_SEL_TYPE_SYS_LDT:
1653 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
1654 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
1655 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
1656 case AMD64_SEL_TYPE_SYS_INT_GATE:
1657 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1658 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1659 }
1660
1661 switch (pDesc->Legacy.Gen.u4Type)
1662 {
1663 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1664 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1665 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1666
1667 case X86_SEL_TYPE_SYS_TASK_GATE:
1668 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
1669
1670 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1671 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1672 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
1673
1674 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1675 Log(("branch %04x -> busy 286 TSS\n", uSel));
1676 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1677
1678 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1679 Log(("branch %04x -> busy 386 TSS\n", uSel));
1680 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1681
1682 default:
1683 case X86_SEL_TYPE_SYS_LDT:
1684 case X86_SEL_TYPE_SYS_286_INT_GATE:
1685 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1686 case X86_SEL_TYPE_SYS_386_INT_GATE:
1687 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1688 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1689 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1690 }
1691}
1692
1693
1694/**
1695 * Implements far jumps.
1696 *
1697 * @param uSel The selector.
1698 * @param offSeg The segment offset.
1699 * @param enmEffOpSize The effective operand size.
1700 */
1701IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1702{
1703 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1704 NOREF(cbInstr);
1705 Assert(offSeg <= UINT32_MAX);
1706
1707 /*
1708 * Real mode and V8086 mode are easy. The only snag seems to be that
1709 * CS.limit doesn't change and the limit check is done against the current
1710 * limit.
1711 */
1712 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1713 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1714 {
1715 if (offSeg > pCtx->cs.u32Limit)
1716 {
1717 Log(("iemCImpl_FarJmp: 16-bit limit\n"));
1718 return iemRaiseGeneralProtectionFault0(pIemCpu);
1719 }
1720
1721 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
1722 pCtx->rip = offSeg;
1723 else
1724 pCtx->rip = offSeg & UINT16_MAX;
1725 pCtx->cs.Sel = uSel;
1726 pCtx->cs.ValidSel = uSel;
1727 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1728 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1729 pCtx->eflags.Bits.u1RF = 0;
1730 return VINF_SUCCESS;
1731 }
1732
1733 /*
1734 * Protected mode. Need to parse the specified descriptor...
1735 */
1736 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1737 {
1738 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1739 return iemRaiseGeneralProtectionFault0(pIemCpu);
1740 }
1741
1742 /* Fetch the descriptor. */
1743 IEMSELDESC Desc;
1744 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP);
1745 if (rcStrict != VINF_SUCCESS)
1746 return rcStrict;
1747
1748 /* Is it there? */
1749 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
1750 {
1751 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1752 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1753 }
1754
1755 /*
1756 * Deal with it according to its type. We do the standard code selectors
1757 * here and dispatch the system selectors to worker functions.
1758 */
1759 if (!Desc.Legacy.Gen.u1DescType)
1760 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
1761
1762 /* Only code segments. */
1763 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1764 {
1765 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1766 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1767 }
1768
1769 /* L vs D. */
1770 if ( Desc.Legacy.Gen.u1Long
1771 && Desc.Legacy.Gen.u1DefBig
1772 && IEM_IS_LONG_MODE(pIemCpu))
1773 {
1774 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1775 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1776 }
1777
1778 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1779 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1780 {
1781 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1782 {
1783 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1784 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1785 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1786 }
1787 }
1788 else
1789 {
1790 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1791 {
1792 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1793 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1794 }
1795 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1796 {
1797 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1798 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1799 }
1800 }
1801
1802 /* Chop the high bits if 16-bit (Intel says so). */
1803 if (enmEffOpSize == IEMMODE_16BIT)
1804 offSeg &= UINT16_MAX;
1805
1806 /* Limit check. (Should alternatively check for non-canonical addresses
1807 here, but that is ruled out by offSeg being 32-bit, right?) */
1808 uint64_t u64Base;
1809 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1810 if (Desc.Legacy.Gen.u1Long)
1811 u64Base = 0;
1812 else
1813 {
1814 if (offSeg > cbLimit)
1815 {
1816 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1817 /** @todo: Intel says this is #GP(0)! */
1818 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1819 }
1820 u64Base = X86DESC_BASE(&Desc.Legacy);
1821 }
1822
1823 /*
1824 * Ok, everything checked out fine. Now set the accessed bit before
1825 * committing the result into CS, CSHID and RIP.
1826 */
1827 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1828 {
1829 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1830 if (rcStrict != VINF_SUCCESS)
1831 return rcStrict;
1832 /** @todo check what VT-x and AMD-V does. */
1833 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1834 }
1835
1836 /* commit */
1837 pCtx->rip = offSeg;
1838 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1839 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1840 pCtx->cs.ValidSel = pCtx->cs.Sel;
1841 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1842 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1843 pCtx->cs.u32Limit = cbLimit;
1844 pCtx->cs.u64Base = u64Base;
1845 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1846 pCtx->eflags.Bits.u1RF = 0;
1847 /** @todo check if the hidden bits are loaded correctly for 64-bit
1848 * mode. */
1849 return VINF_SUCCESS;
1850}
1851
1852
1853/**
1854 * Implements far calls.
1855 *
1856 * This very similar to iemCImpl_FarJmp.
1857 *
1858 * @param uSel The selector.
1859 * @param offSeg The segment offset.
1860 * @param enmEffOpSize The operand size (in case we need it).
1861 */
1862IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1863{
1864 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1865 VBOXSTRICTRC rcStrict;
1866 uint64_t uNewRsp;
1867 RTPTRUNION uPtrRet;
1868
1869 /*
1870 * Real mode and V8086 mode are easy. The only snag seems to be that
1871 * CS.limit doesn't change and the limit check is done against the current
1872 * limit.
1873 */
1874 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1875 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1876 {
1877 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1878
1879 /* Check stack first - may #SS(0). */
1880 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1881 &uPtrRet.pv, &uNewRsp);
1882 if (rcStrict != VINF_SUCCESS)
1883 return rcStrict;
1884
1885 /* Check the target address range. */
1886 if (offSeg > UINT32_MAX)
1887 return iemRaiseGeneralProtectionFault0(pIemCpu);
1888
1889 /* Everything is fine, push the return address. */
1890 if (enmEffOpSize == IEMMODE_16BIT)
1891 {
1892 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1893 uPtrRet.pu16[1] = pCtx->cs.Sel;
1894 }
1895 else
1896 {
1897 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1898 uPtrRet.pu16[3] = pCtx->cs.Sel;
1899 }
1900 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1901 if (rcStrict != VINF_SUCCESS)
1902 return rcStrict;
1903
1904 /* Branch. */
1905 pCtx->rip = offSeg;
1906 pCtx->cs.Sel = uSel;
1907 pCtx->cs.ValidSel = uSel;
1908 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1909 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1910 pCtx->eflags.Bits.u1RF = 0;
1911 return VINF_SUCCESS;
1912 }
1913
1914 /*
1915 * Protected mode. Need to parse the specified descriptor...
1916 */
1917 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1918 {
1919 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1920 return iemRaiseGeneralProtectionFault0(pIemCpu);
1921 }
1922
1923 /* Fetch the descriptor. */
1924 IEMSELDESC Desc;
1925 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP);
1926 if (rcStrict != VINF_SUCCESS)
1927 return rcStrict;
1928
1929 /*
1930 * Deal with it according to its type. We do the standard code selectors
1931 * here and dispatch the system selectors to worker functions.
1932 */
1933 if (!Desc.Legacy.Gen.u1DescType)
1934 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1935
1936 /* Only code segments. */
1937 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1938 {
1939 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1940 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1941 }
1942
1943 /* L vs D. */
1944 if ( Desc.Legacy.Gen.u1Long
1945 && Desc.Legacy.Gen.u1DefBig
1946 && IEM_IS_LONG_MODE(pIemCpu))
1947 {
1948 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1949 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1950 }
1951
1952 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1953 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1954 {
1955 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1956 {
1957 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1958 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1959 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1960 }
1961 }
1962 else
1963 {
1964 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1965 {
1966 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1967 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1968 }
1969 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1970 {
1971 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1972 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1973 }
1974 }
1975
1976 /* Is it there? */
1977 if (!Desc.Legacy.Gen.u1Present)
1978 {
1979 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1980 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1981 }
1982
1983 /* Check stack first - may #SS(0). */
1984 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1985 * 16-bit code cause a two or four byte CS to be pushed? */
1986 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1987 enmEffOpSize == IEMMODE_64BIT ? 8+8
1988 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1989 &uPtrRet.pv, &uNewRsp);
1990 if (rcStrict != VINF_SUCCESS)
1991 return rcStrict;
1992
1993 /* Chop the high bits if 16-bit (Intel says so). */
1994 if (enmEffOpSize == IEMMODE_16BIT)
1995 offSeg &= UINT16_MAX;
1996
1997 /* Limit / canonical check. */
1998 uint64_t u64Base;
1999 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2000 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2001 {
2002 if (!IEM_IS_CANONICAL(offSeg))
2003 {
2004 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
2005 return iemRaiseNotCanonical(pIemCpu);
2006 }
2007 u64Base = 0;
2008 }
2009 else
2010 {
2011 if (offSeg > cbLimit)
2012 {
2013 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
2014 /** @todo: Intel says this is #GP(0)! */
2015 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2016 }
2017 u64Base = X86DESC_BASE(&Desc.Legacy);
2018 }
2019
2020 /*
2021 * Now set the accessed bit before
2022 * writing the return address to the stack and committing the result into
2023 * CS, CSHID and RIP.
2024 */
2025 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2026 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2027 {
2028 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2029 if (rcStrict != VINF_SUCCESS)
2030 return rcStrict;
2031 /** @todo check what VT-x and AMD-V does. */
2032 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2033 }
2034
2035 /* stack */
2036 if (enmEffOpSize == IEMMODE_16BIT)
2037 {
2038 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
2039 uPtrRet.pu16[1] = pCtx->cs.Sel;
2040 }
2041 else if (enmEffOpSize == IEMMODE_32BIT)
2042 {
2043 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
2044 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
2045 }
2046 else
2047 {
2048 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
2049 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
2050 }
2051 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
2052 if (rcStrict != VINF_SUCCESS)
2053 return rcStrict;
2054
2055 /* commit */
2056 pCtx->rip = offSeg;
2057 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2058 pCtx->cs.Sel |= pIemCpu->uCpl;
2059 pCtx->cs.ValidSel = pCtx->cs.Sel;
2060 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2061 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2062 pCtx->cs.u32Limit = cbLimit;
2063 pCtx->cs.u64Base = u64Base;
2064 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
2065 pCtx->eflags.Bits.u1RF = 0;
2066 /** @todo check if the hidden bits are loaded correctly for 64-bit
2067 * mode. */
2068 return VINF_SUCCESS;
2069}
2070
2071
2072/**
2073 * Implements retf.
2074 *
2075 * @param enmEffOpSize The effective operand size.
2076 * @param cbPop The amount of arguments to pop from the stack
2077 * (bytes).
2078 */
2079IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2080{
2081 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2082 VBOXSTRICTRC rcStrict;
2083 RTCPTRUNION uPtrFrame;
2084 uint64_t uNewRsp;
2085 uint64_t uNewRip;
2086 uint16_t uNewCs;
2087 NOREF(cbInstr);
2088
2089 /*
2090 * Read the stack values first.
2091 */
2092 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
2093 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
2094 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
2095 if (rcStrict != VINF_SUCCESS)
2096 return rcStrict;
2097 if (enmEffOpSize == IEMMODE_16BIT)
2098 {
2099 uNewRip = uPtrFrame.pu16[0];
2100 uNewCs = uPtrFrame.pu16[1];
2101 }
2102 else if (enmEffOpSize == IEMMODE_32BIT)
2103 {
2104 uNewRip = uPtrFrame.pu32[0];
2105 uNewCs = uPtrFrame.pu16[2];
2106 }
2107 else
2108 {
2109 uNewRip = uPtrFrame.pu64[0];
2110 uNewCs = uPtrFrame.pu16[4];
2111 }
2112
2113 /*
2114 * Real mode and V8086 mode are easy.
2115 */
2116 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2117 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2118 {
2119 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2120 /** @todo check how this is supposed to work if sp=0xfffe. */
2121
2122 /* Check the limit of the new EIP. */
2123 /** @todo Intel pseudo code only does the limit check for 16-bit
2124 * operands, AMD does not make any distinction. What is right? */
2125 if (uNewRip > pCtx->cs.u32Limit)
2126 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2127
2128 /* commit the operation. */
2129 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
2130 if (rcStrict != VINF_SUCCESS)
2131 return rcStrict;
2132 pCtx->rip = uNewRip;
2133 pCtx->cs.Sel = uNewCs;
2134 pCtx->cs.ValidSel = uNewCs;
2135 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2136 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2137 pCtx->eflags.Bits.u1RF = 0;
2138 /** @todo do we load attribs and limit as well? */
2139 if (cbPop)
2140 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2141 return VINF_SUCCESS;
2142 }
2143
2144 /*
2145 * Protected mode is complicated, of course.
2146 */
2147 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2148 {
2149 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
2150 return iemRaiseGeneralProtectionFault0(pIemCpu);
2151 }
2152
2153 /* Fetch the descriptor. */
2154 IEMSELDESC DescCs;
2155 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs, X86_XCPT_GP);
2156 if (rcStrict != VINF_SUCCESS)
2157 return rcStrict;
2158
2159 /* Can only return to a code selector. */
2160 if ( !DescCs.Legacy.Gen.u1DescType
2161 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
2162 {
2163 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
2164 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
2165 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2166 }
2167
2168 /* L vs D. */
2169 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
2170 && DescCs.Legacy.Gen.u1DefBig
2171 && IEM_IS_LONG_MODE(pIemCpu))
2172 {
2173 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
2174 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2175 }
2176
2177 /* DPL/RPL/CPL checks. */
2178 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2179 {
2180 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
2181 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2182 }
2183
2184 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2185 {
2186 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
2187 {
2188 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
2189 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2190 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2191 }
2192 }
2193 else
2194 {
2195 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
2196 {
2197 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
2198 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2199 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2200 }
2201 }
2202
2203 /* Is it there? */
2204 if (!DescCs.Legacy.Gen.u1Present)
2205 {
2206 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
2207 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2208 }
2209
2210 /*
2211 * Return to outer privilege? (We'll typically have entered via a call gate.)
2212 */
2213 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2214 {
2215 /* Read the outer stack pointer stored *after* the parameters. */
2216 RTCPTRUNION uPtrStack;
2217 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
2218 if (rcStrict != VINF_SUCCESS)
2219 return rcStrict;
2220
2221 uPtrStack.pu8 += cbPop; /* Skip the parameters. */
2222
2223 uint16_t uNewOuterSs;
2224 uint64_t uNewOuterRsp;
2225 if (enmEffOpSize == IEMMODE_16BIT)
2226 {
2227 uNewOuterRsp = uPtrStack.pu16[0];
2228 uNewOuterSs = uPtrStack.pu16[1];
2229 }
2230 else if (enmEffOpSize == IEMMODE_32BIT)
2231 {
2232 uNewOuterRsp = uPtrStack.pu32[0];
2233 uNewOuterSs = uPtrStack.pu16[2];
2234 }
2235 else
2236 {
2237 uNewOuterRsp = uPtrStack.pu64[0];
2238 uNewOuterSs = uPtrStack.pu16[4];
2239 }
2240
2241 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
2242 and read the selector. */
2243 IEMSELDESC DescSs;
2244 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
2245 {
2246 if ( !DescCs.Legacy.Gen.u1Long
2247 || (uNewOuterSs & X86_SEL_RPL) == 3)
2248 {
2249 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
2250 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2251 return iemRaiseGeneralProtectionFault0(pIemCpu);
2252 }
2253 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
2254 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
2255 }
2256 else
2257 {
2258 /* Fetch the descriptor for the new stack segment. */
2259 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);
2260 if (rcStrict != VINF_SUCCESS)
2261 return rcStrict;
2262 }
2263
2264 /* Check that RPL of stack and code selectors match. */
2265 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
2266 {
2267 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2268 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2269 }
2270
2271 /* Must be a writable data segment. */
2272 if ( !DescSs.Legacy.Gen.u1DescType
2273 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2274 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2275 {
2276 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
2277 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
2278 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2279 }
2280
2281 /* L vs D. (Not mentioned by intel.) */
2282 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
2283 && DescSs.Legacy.Gen.u1DefBig
2284 && IEM_IS_LONG_MODE(pIemCpu))
2285 {
2286 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
2287 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2288 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2289 }
2290
2291 /* DPL/RPL/CPL checks. */
2292 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2293 {
2294 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
2295 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
2296 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2297 }
2298
2299 /* Is it there? */
2300 if (!DescSs.Legacy.Gen.u1Present)
2301 {
2302 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2303 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2304 }
2305
2306 /* Calc SS limit.*/
2307 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
2308
2309 /* Is RIP canonical or within CS.limit? */
2310 uint64_t u64Base;
2311 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2312
2313 /** @todo Testcase: Is this correct? */
2314 if ( DescCs.Legacy.Gen.u1Long
2315 && IEM_IS_LONG_MODE(pIemCpu) )
2316 {
2317 if (!IEM_IS_CANONICAL(uNewRip))
2318 {
2319 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2320 return iemRaiseNotCanonical(pIemCpu);
2321 }
2322 u64Base = 0;
2323 }
2324 else
2325 {
2326 if (uNewRip > cbLimitCs)
2327 {
2328 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
2329 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
2330 /** @todo: Intel says this is #GP(0)! */
2331 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2332 }
2333 u64Base = X86DESC_BASE(&DescCs.Legacy);
2334 }
2335
2336 /*
2337 * Now set the accessed bit before
2338 * writing the return address to the stack and committing the result into
2339 * CS, CSHID and RIP.
2340 */
2341 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
2342 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2343 {
2344 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2345 if (rcStrict != VINF_SUCCESS)
2346 return rcStrict;
2347 /** @todo check what VT-x and AMD-V does. */
2348 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2349 }
2350 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
2351 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2352 {
2353 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
2354 if (rcStrict != VINF_SUCCESS)
2355 return rcStrict;
2356 /** @todo check what VT-x and AMD-V does. */
2357 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2358 }
2359
2360 /* commit */
2361 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
2362 if (rcStrict != VINF_SUCCESS)
2363 return rcStrict;
2364 if (enmEffOpSize == IEMMODE_16BIT)
2365 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2366 else
2367 pCtx->rip = uNewRip;
2368 pCtx->cs.Sel = uNewCs;
2369 pCtx->cs.ValidSel = uNewCs;
2370 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2371 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2372 pCtx->cs.u32Limit = cbLimitCs;
2373 pCtx->cs.u64Base = u64Base;
2374 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
2375 pCtx->rsp = uNewOuterRsp;
2376 pCtx->ss.Sel = uNewOuterSs;
2377 pCtx->ss.ValidSel = uNewOuterSs;
2378 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2379 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
2380 pCtx->ss.u32Limit = cbLimitSs;
2381 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2382 pCtx->ss.u64Base = 0;
2383 else
2384 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
2385
2386 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
2387 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2388 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2389 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2390 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2391
2392 /** @todo check if the hidden bits are loaded correctly for 64-bit
2393 * mode. */
2394
2395 if (cbPop)
2396 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2397 pCtx->eflags.Bits.u1RF = 0;
2398
2399 /* Done! */
2400 }
2401 /*
2402 * Return to the same privilege level
2403 */
2404 else
2405 {
2406 /* Limit / canonical check. */
2407 uint64_t u64Base;
2408 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2409
2410 /** @todo Testcase: Is this correct? */
2411 if ( DescCs.Legacy.Gen.u1Long
2412 && IEM_IS_LONG_MODE(pIemCpu) )
2413 {
2414 if (!IEM_IS_CANONICAL(uNewRip))
2415 {
2416 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
2417 return iemRaiseNotCanonical(pIemCpu);
2418 }
2419 u64Base = 0;
2420 }
2421 else
2422 {
2423 if (uNewRip > cbLimitCs)
2424 {
2425 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
2426 /** @todo: Intel says this is #GP(0)! */
2427 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2428 }
2429 u64Base = X86DESC_BASE(&DescCs.Legacy);
2430 }
2431
2432 /*
2433 * Now set the accessed bit before
2434 * writing the return address to the stack and committing the result into
2435 * CS, CSHID and RIP.
2436 */
2437 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2438 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2439 {
2440 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2441 if (rcStrict != VINF_SUCCESS)
2442 return rcStrict;
2443 /** @todo check what VT-x and AMD-V does. */
2444 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2445 }
2446
2447 /* commit */
2448 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
2449 if (rcStrict != VINF_SUCCESS)
2450 return rcStrict;
2451 if (enmEffOpSize == IEMMODE_16BIT)
2452 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2453 else
2454 pCtx->rip = uNewRip;
2455 pCtx->cs.Sel = uNewCs;
2456 pCtx->cs.ValidSel = uNewCs;
2457 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2458 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2459 pCtx->cs.u32Limit = cbLimitCs;
2460 pCtx->cs.u64Base = u64Base;
2461 /** @todo check if the hidden bits are loaded correctly for 64-bit
2462 * mode. */
2463 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
2464 if (cbPop)
2465 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2466 pCtx->eflags.Bits.u1RF = 0;
2467 }
2468 return VINF_SUCCESS;
2469}
2470
2471
2472/**
2473 * Implements retn.
2474 *
2475 * We're doing this in C because of the \#GP that might be raised if the popped
2476 * program counter is out of bounds.
2477 *
2478 * @param enmEffOpSize The effective operand size.
2479 * @param cbPop The amount of arguments to pop from the stack
2480 * (bytes).
2481 */
2482IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2483{
2484 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2485 NOREF(cbInstr);
2486
2487 /* Fetch the RSP from the stack. */
2488 VBOXSTRICTRC rcStrict;
2489 RTUINT64U NewRip;
2490 RTUINT64U NewRsp;
2491 NewRsp.u = pCtx->rsp;
2492 switch (enmEffOpSize)
2493 {
2494 case IEMMODE_16BIT:
2495 NewRip.u = 0;
2496 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
2497 break;
2498 case IEMMODE_32BIT:
2499 NewRip.u = 0;
2500 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
2501 break;
2502 case IEMMODE_64BIT:
2503 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
2504 break;
2505 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2506 }
2507 if (rcStrict != VINF_SUCCESS)
2508 return rcStrict;
2509
2510 /* Check the new RSP before loading it. */
2511 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
2512 * of it. The canonical test is performed here and for call. */
2513 if (enmEffOpSize != IEMMODE_64BIT)
2514 {
2515 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
2516 {
2517 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
2518 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2519 }
2520 }
2521 else
2522 {
2523 if (!IEM_IS_CANONICAL(NewRip.u))
2524 {
2525 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
2526 return iemRaiseNotCanonical(pIemCpu);
2527 }
2528 }
2529
2530 /* Apply cbPop */
2531 if (cbPop)
2532 iemRegAddToRspEx(pIemCpu, pCtx, &NewRsp, cbPop);
2533
2534 /* Commit it. */
2535 pCtx->rip = NewRip.u;
2536 pCtx->rsp = NewRsp.u;
2537 pCtx->eflags.Bits.u1RF = 0;
2538
2539 return VINF_SUCCESS;
2540}
2541
2542
2543/**
2544 * Implements enter.
2545 *
2546 * We're doing this in C because the instruction is insane, even for the
2547 * u8NestingLevel=0 case dealing with the stack is tedious.
2548 *
2549 * @param enmEffOpSize The effective operand size.
2550 */
2551IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
2552{
2553 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2554
2555 /* Push RBP, saving the old value in TmpRbp. */
2556 RTUINT64U NewRsp; NewRsp.u = pCtx->rsp;
2557 RTUINT64U TmpRbp; TmpRbp.u = pCtx->rbp;
2558 RTUINT64U NewRbp;
2559 VBOXSTRICTRC rcStrict;
2560 if (enmEffOpSize == IEMMODE_64BIT)
2561 {
2562 rcStrict = iemMemStackPushU64Ex(pIemCpu, TmpRbp.u, &NewRsp);
2563 NewRbp = NewRsp;
2564 }
2565 else if (enmEffOpSize == IEMMODE_32BIT)
2566 {
2567 rcStrict = iemMemStackPushU32Ex(pIemCpu, TmpRbp.DWords.dw0, &NewRsp);
2568 NewRbp = NewRsp;
2569 }
2570 else
2571 {
2572 rcStrict = iemMemStackPushU16Ex(pIemCpu, TmpRbp.Words.w0, &NewRsp);
2573 NewRbp = TmpRbp;
2574 NewRbp.Words.w0 = NewRsp.Words.w0;
2575 }
2576 if (rcStrict != VINF_SUCCESS)
2577 return rcStrict;
2578
2579 /* Copy the parameters (aka nesting levels by Intel). */
2580 cParameters &= 0x1f;
2581 if (cParameters > 0)
2582 {
2583 switch (enmEffOpSize)
2584 {
2585 case IEMMODE_16BIT:
2586 if (pCtx->ss.Attr.n.u1DefBig)
2587 TmpRbp.DWords.dw0 -= 2;
2588 else
2589 TmpRbp.Words.w0 -= 2;
2590 do
2591 {
2592 uint16_t u16Tmp;
2593 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Tmp, &TmpRbp);
2594 if (rcStrict != VINF_SUCCESS)
2595 break;
2596 rcStrict = iemMemStackPushU16Ex(pIemCpu, u16Tmp, &NewRsp);
2597 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2598 break;
2599
2600 case IEMMODE_32BIT:
2601 if (pCtx->ss.Attr.n.u1DefBig)
2602 TmpRbp.DWords.dw0 -= 4;
2603 else
2604 TmpRbp.Words.w0 -= 4;
2605 do
2606 {
2607 uint32_t u32Tmp;
2608 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Tmp, &TmpRbp);
2609 if (rcStrict != VINF_SUCCESS)
2610 break;
2611 rcStrict = iemMemStackPushU32Ex(pIemCpu, u32Tmp, &NewRsp);
2612 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2613 break;
2614
2615 case IEMMODE_64BIT:
2616 TmpRbp.u -= 8;
2617 do
2618 {
2619 uint64_t u64Tmp;
2620 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Tmp, &TmpRbp);
2621 if (rcStrict != VINF_SUCCESS)
2622 break;
2623 rcStrict = iemMemStackPushU64Ex(pIemCpu, u64Tmp, &NewRsp);
2624 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2625 break;
2626
2627 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2628 }
2629 if (rcStrict != VINF_SUCCESS)
2630 return VINF_SUCCESS;
2631
2632 /* Push the new RBP */
2633 if (enmEffOpSize == IEMMODE_64BIT)
2634 rcStrict = iemMemStackPushU64Ex(pIemCpu, NewRbp.u, &NewRsp);
2635 else if (enmEffOpSize == IEMMODE_32BIT)
2636 rcStrict = iemMemStackPushU32Ex(pIemCpu, NewRbp.DWords.dw0, &NewRsp);
2637 else
2638 rcStrict = iemMemStackPushU16Ex(pIemCpu, NewRbp.Words.w0, &NewRsp);
2639 if (rcStrict != VINF_SUCCESS)
2640 return rcStrict;
2641
2642 }
2643
2644 /* Recalc RSP. */
2645 iemRegSubFromRspEx(pIemCpu, pCtx, &NewRsp, cbFrame);
2646
2647 /** @todo Should probe write access at the new RSP according to AMD. */
2648
2649 /* Commit it. */
2650 pCtx->rbp = NewRbp.u;
2651 pCtx->rsp = NewRsp.u;
2652 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
2653
2654 return VINF_SUCCESS;
2655}
2656
2657
2658
2659/**
2660 * Implements leave.
2661 *
2662 * We're doing this in C because messing with the stack registers is annoying
2663 * since they depends on SS attributes.
2664 *
2665 * @param enmEffOpSize The effective operand size.
2666 */
2667IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
2668{
2669 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2670
2671 /* Calculate the intermediate RSP from RBP and the stack attributes. */
2672 RTUINT64U NewRsp;
2673 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2674 NewRsp.u = pCtx->rbp;
2675 else if (pCtx->ss.Attr.n.u1DefBig)
2676 NewRsp.u = pCtx->ebp;
2677 else
2678 {
2679 /** @todo Check that LEAVE actually preserve the high EBP bits. */
2680 NewRsp.u = pCtx->rsp;
2681 NewRsp.Words.w0 = pCtx->bp;
2682 }
2683
2684 /* Pop RBP according to the operand size. */
2685 VBOXSTRICTRC rcStrict;
2686 RTUINT64U NewRbp;
2687 switch (enmEffOpSize)
2688 {
2689 case IEMMODE_16BIT:
2690 NewRbp.u = pCtx->rbp;
2691 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
2692 break;
2693 case IEMMODE_32BIT:
2694 NewRbp.u = 0;
2695 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
2696 break;
2697 case IEMMODE_64BIT:
2698 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
2699 break;
2700 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2701 }
2702 if (rcStrict != VINF_SUCCESS)
2703 return rcStrict;
2704
2705
2706 /* Commit it. */
2707 pCtx->rbp = NewRbp.u;
2708 pCtx->rsp = NewRsp.u;
2709 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
2710
2711 return VINF_SUCCESS;
2712}
2713
2714
2715/**
2716 * Implements int3 and int XX.
2717 *
2718 * @param u8Int The interrupt vector number.
2719 * @param fIsBpInstr Is it the breakpoint instruction.
2720 */
2721IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
2722{
2723 Assert(pIemCpu->cXcptRecursions == 0);
2724 return iemRaiseXcptOrInt(pIemCpu,
2725 cbInstr,
2726 u8Int,
2727 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
2728 0,
2729 0);
2730}
2731
2732
2733/**
2734 * Implements iret for real mode and V8086 mode.
2735 *
2736 * @param enmEffOpSize The effective operand size.
2737 */
2738IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
2739{
2740 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2741 X86EFLAGS Efl;
2742 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
2743 NOREF(cbInstr);
2744
2745 /*
2746 * iret throws an exception if VME isn't enabled.
2747 */
2748 if ( Efl.Bits.u1VM
2749 && Efl.Bits.u2IOPL != 3
2750 && !(pCtx->cr4 & X86_CR4_VME))
2751 return iemRaiseGeneralProtectionFault0(pIemCpu);
2752
2753 /*
2754 * Do the stack bits, but don't commit RSP before everything checks
2755 * out right.
2756 */
2757 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2758 VBOXSTRICTRC rcStrict;
2759 RTCPTRUNION uFrame;
2760 uint16_t uNewCs;
2761 uint32_t uNewEip;
2762 uint32_t uNewFlags;
2763 uint64_t uNewRsp;
2764 if (enmEffOpSize == IEMMODE_32BIT)
2765 {
2766 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
2767 if (rcStrict != VINF_SUCCESS)
2768 return rcStrict;
2769 uNewEip = uFrame.pu32[0];
2770 if (uNewEip > UINT16_MAX)
2771 return iemRaiseGeneralProtectionFault0(pIemCpu);
2772
2773 uNewCs = (uint16_t)uFrame.pu32[1];
2774 uNewFlags = uFrame.pu32[2];
2775 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2776 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
2777 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
2778 | X86_EFL_ID;
2779 if (IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_386)
2780 uNewFlags &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
2781 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
2782 }
2783 else
2784 {
2785 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
2786 if (rcStrict != VINF_SUCCESS)
2787 return rcStrict;
2788 uNewEip = uFrame.pu16[0];
2789 uNewCs = uFrame.pu16[1];
2790 uNewFlags = uFrame.pu16[2];
2791 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2792 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
2793 uNewFlags |= Efl.u & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF);
2794 /** @todo The intel pseudo code does not indicate what happens to
2795 * reserved flags. We just ignore them. */
2796 /* Ancient CPU adjustments: See iemCImpl_popf. */
2797 if (IEM_GET_TARGET_CPU(pIemCpu) == IEMTARGETCPU_286)
2798 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL);
2799 }
2800 /** @todo Check how this is supposed to work if sp=0xfffe. */
2801 Log7(("iemCImpl_iret_real_v8086: uNewCs=%#06x uNewRip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n",
2802 uNewCs, uNewEip, uNewFlags, uNewRsp));
2803
2804 /*
2805 * Check the limit of the new EIP.
2806 */
2807 /** @todo Only the AMD pseudo code check the limit here, what's
2808 * right? */
2809 if (uNewEip > pCtx->cs.u32Limit)
2810 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2811
2812 /*
2813 * V8086 checks and flag adjustments
2814 */
2815 if (Efl.Bits.u1VM)
2816 {
2817 if (Efl.Bits.u2IOPL == 3)
2818 {
2819 /* Preserve IOPL and clear RF. */
2820 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
2821 uNewFlags |= Efl.u & (X86_EFL_IOPL);
2822 }
2823 else if ( enmEffOpSize == IEMMODE_16BIT
2824 && ( !(uNewFlags & X86_EFL_IF)
2825 || !Efl.Bits.u1VIP )
2826 && !(uNewFlags & X86_EFL_TF) )
2827 {
2828 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
2829 uNewFlags &= ~X86_EFL_VIF;
2830 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
2831 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
2832 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
2833 }
2834 else
2835 return iemRaiseGeneralProtectionFault0(pIemCpu);
2836 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags));
2837 }
2838
2839 /*
2840 * Commit the operation.
2841 */
2842 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
2843 if (rcStrict != VINF_SUCCESS)
2844 return rcStrict;
2845#ifdef DBGFTRACE_ENABLED
2846 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
2847 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewRsp);
2848#endif
2849
2850 pCtx->rip = uNewEip;
2851 pCtx->cs.Sel = uNewCs;
2852 pCtx->cs.ValidSel = uNewCs;
2853 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2854 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2855 /** @todo do we load attribs and limit as well? */
2856 Assert(uNewFlags & X86_EFL_1);
2857 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags);
2858
2859 return VINF_SUCCESS;
2860}
2861
2862
2863/**
2864 * Loads a segment register when entering V8086 mode.
2865 *
2866 * @param pSReg The segment register.
2867 * @param uSeg The segment to load.
2868 */
2869static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
2870{
2871 pSReg->Sel = uSeg;
2872 pSReg->ValidSel = uSeg;
2873 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2874 pSReg->u64Base = (uint32_t)uSeg << 4;
2875 pSReg->u32Limit = 0xffff;
2876 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
2877 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
2878 * IRET'ing to V8086. */
2879}
2880
2881
2882/**
2883 * Implements iret for protected mode returning to V8086 mode.
2884 *
2885 * @param pCtx Pointer to the CPU context.
2886 * @param uNewEip The new EIP.
2887 * @param uNewCs The new CS.
2888 * @param uNewFlags The new EFLAGS.
2889 * @param uNewRsp The RSP after the initial IRET frame.
2890 *
2891 * @note This can only be a 32-bit iret du to the X86_EFL_VM position.
2892 */
2893IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uint16_t, uNewCs,
2894 uint32_t, uNewFlags, uint64_t, uNewRsp)
2895{
2896 /*
2897 * Pop the V8086 specific frame bits off the stack.
2898 */
2899 VBOXSTRICTRC rcStrict;
2900 RTCPTRUNION uFrame;
2901 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 24, &uFrame.pv, &uNewRsp);
2902 if (rcStrict != VINF_SUCCESS)
2903 return rcStrict;
2904 uint32_t uNewEsp = uFrame.pu32[0];
2905 uint16_t uNewSs = uFrame.pu32[1];
2906 uint16_t uNewEs = uFrame.pu32[2];
2907 uint16_t uNewDs = uFrame.pu32[3];
2908 uint16_t uNewFs = uFrame.pu32[4];
2909 uint16_t uNewGs = uFrame.pu32[5];
2910 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2911 if (rcStrict != VINF_SUCCESS)
2912 return rcStrict;
2913
2914 /*
2915 * Commit the operation.
2916 */
2917 uNewFlags &= X86_EFL_LIVE_MASK;
2918 uNewFlags |= X86_EFL_RA1_MASK;
2919#ifdef DBGFTRACE_ENABLED
2920 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
2921 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);
2922#endif
2923
2924 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags);
2925 iemCImplCommonV8086LoadSeg(&pCtx->cs, uNewCs);
2926 iemCImplCommonV8086LoadSeg(&pCtx->ss, uNewSs);
2927 iemCImplCommonV8086LoadSeg(&pCtx->es, uNewEs);
2928 iemCImplCommonV8086LoadSeg(&pCtx->ds, uNewDs);
2929 iemCImplCommonV8086LoadSeg(&pCtx->fs, uNewFs);
2930 iemCImplCommonV8086LoadSeg(&pCtx->gs, uNewGs);
2931 pCtx->rip = (uint16_t)uNewEip;
2932 pCtx->rsp = uNewEsp; /** @todo check this out! */
2933 pIemCpu->uCpl = 3;
2934
2935 return VINF_SUCCESS;
2936}
2937
2938
2939/**
2940 * Implements iret for protected mode returning via a nested task.
2941 *
2942 * @param enmEffOpSize The effective operand size.
2943 */
2944IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
2945{
2946 Log7(("iemCImpl_iret_prot_NestedTask:\n"));
2947#ifndef IEM_IMPLEMENTS_TASKSWITCH
2948 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2949#else
2950 /*
2951 * Read the segment selector in the link-field of the current TSS.
2952 */
2953 RTSEL uSelRet;
2954 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2955 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pIemCpu, &uSelRet, UINT8_MAX, pCtx->tr.u64Base);
2956 if (rcStrict != VINF_SUCCESS)
2957 return rcStrict;
2958
2959 /*
2960 * Fetch the returning task's TSS descriptor from the GDT.
2961 */
2962 if (uSelRet & X86_SEL_LDT)
2963 {
2964 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet));
2965 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet);
2966 }
2967
2968 IEMSELDESC TssDesc;
2969 rcStrict = iemMemFetchSelDesc(pIemCpu, &TssDesc, uSelRet, X86_XCPT_GP);
2970 if (rcStrict != VINF_SUCCESS)
2971 return rcStrict;
2972
2973 if (TssDesc.Legacy.Gate.u1DescType)
2974 {
2975 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet));
2976 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
2977 }
2978
2979 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
2980 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
2981 {
2982 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
2983 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
2984 }
2985
2986 if (!TssDesc.Legacy.Gate.u1Present)
2987 {
2988 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet));
2989 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
2990 }
2991
2992 uint32_t uNextEip = pCtx->eip + cbInstr;
2993 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
2994 0 /* uCr2 */, uSelRet, &TssDesc);
2995#endif
2996}
2997
2998
2999/**
3000 * Implements iret for protected mode
3001 *
3002 * @param enmEffOpSize The effective operand size.
3003 */
3004IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
3005{
3006 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3007 NOREF(cbInstr);
3008 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3009
3010 /*
3011 * Nested task return.
3012 */
3013 if (pCtx->eflags.Bits.u1NT)
3014 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
3015
3016 /*
3017 * Normal return.
3018 *
3019 * Do the stack bits, but don't commit RSP before everything checks
3020 * out right.
3021 */
3022 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3023 VBOXSTRICTRC rcStrict;
3024 RTCPTRUNION uFrame;
3025 uint16_t uNewCs;
3026 uint32_t uNewEip;
3027 uint32_t uNewFlags;
3028 uint64_t uNewRsp;
3029 if (enmEffOpSize == IEMMODE_32BIT)
3030 {
3031 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
3032 if (rcStrict != VINF_SUCCESS)
3033 return rcStrict;
3034 uNewEip = uFrame.pu32[0];
3035 uNewCs = (uint16_t)uFrame.pu32[1];
3036 uNewFlags = uFrame.pu32[2];
3037 }
3038 else
3039 {
3040 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
3041 if (rcStrict != VINF_SUCCESS)
3042 return rcStrict;
3043 uNewEip = uFrame.pu16[0];
3044 uNewCs = uFrame.pu16[1];
3045 uNewFlags = uFrame.pu16[2];
3046 }
3047 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3048 if (rcStrict != VINF_SUCCESS)
3049 return rcStrict;
3050 Log7(("iemCImpl_iret_prot: uNewCs=%#06x uNewEip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n", uNewCs, uNewEip, uNewFlags, uNewRsp));
3051
3052 /*
3053 * We're hopefully not returning to V8086 mode...
3054 */
3055 if ( (uNewFlags & X86_EFL_VM)
3056 && pIemCpu->uCpl == 0)
3057 {
3058 Assert(enmEffOpSize == IEMMODE_32BIT);
3059 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, pCtx, uNewEip, uNewCs, uNewFlags, uNewRsp);
3060 }
3061
3062 /*
3063 * Protected mode.
3064 */
3065 /* Read the CS descriptor. */
3066 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3067 {
3068 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
3069 return iemRaiseGeneralProtectionFault0(pIemCpu);
3070 }
3071
3072 IEMSELDESC DescCS;
3073 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs, X86_XCPT_GP);
3074 if (rcStrict != VINF_SUCCESS)
3075 {
3076 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
3077 return rcStrict;
3078 }
3079
3080 /* Must be a code descriptor. */
3081 if (!DescCS.Legacy.Gen.u1DescType)
3082 {
3083 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3084 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3085 }
3086 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3087 {
3088 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3089 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3090 }
3091
3092#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3093 /* Raw ring-0 and ring-1 compression adjustments for PATM performance tricks and other CS leaks. */
3094 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3095 if (EMIsRawRing0Enabled(pVM) && !HMIsEnabled(pVM))
3096 {
3097 if ((uNewCs & X86_SEL_RPL) == 1)
3098 {
3099 if ( pIemCpu->uCpl == 0
3100 && ( !EMIsRawRing1Enabled(pVM)
3101 || pCtx->cs.Sel == (uNewCs & X86_SEL_MASK_OFF_RPL)) )
3102 {
3103 Log(("iret: Ring-0 compression fix: uNewCS=%#x -> %#x\n", uNewCs, uNewCs & X86_SEL_MASK_OFF_RPL));
3104 uNewCs &= X86_SEL_MASK_OFF_RPL;
3105 }
3106# ifdef LOG_ENABLED
3107 else if (pIemCpu->uCpl <= 1 && EMIsRawRing1Enabled(pVM))
3108 Log(("iret: uNewCs=%#x genuine return to ring-1.\n", uNewCs));
3109# endif
3110 }
3111 else if ( (uNewCs & X86_SEL_RPL) == 2
3112 && EMIsRawRing1Enabled(pVM)
3113 && pIemCpu->uCpl <= 1)
3114 {
3115 Log(("iret: Ring-1 compression fix: uNewCS=%#x -> %#x\n", uNewCs, (uNewCs & X86_SEL_MASK_OFF_RPL) | 1));
3116 uNewCs = (uNewCs & X86_SEL_MASK_OFF_RPL) | 2;
3117 }
3118 }
3119#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
3120
3121
3122 /* Privilege checks. */
3123 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3124 {
3125 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3126 {
3127 Log(("iret %04x:%08x - RPL != DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3128 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3129 }
3130 }
3131 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3132 {
3133 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3134 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3135 }
3136 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
3137 {
3138 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
3139 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3140 }
3141
3142 /* Present? */
3143 if (!DescCS.Legacy.Gen.u1Present)
3144 {
3145 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
3146 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
3147 }
3148
3149 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3150
3151 /*
3152 * Return to outer level?
3153 */
3154 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
3155 {
3156 uint16_t uNewSS;
3157 uint32_t uNewESP;
3158 if (enmEffOpSize == IEMMODE_32BIT)
3159 {
3160 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
3161 if (rcStrict != VINF_SUCCESS)
3162 return rcStrict;
3163/** @todo We might be popping a 32-bit ESP from the IRET frame, but whether
3164 * 16-bit or 32-bit are being loaded into SP depends on the D/B
3165 * bit of the popped SS selector it turns out. */
3166 uNewESP = uFrame.pu32[0];
3167 uNewSS = (uint16_t)uFrame.pu32[1];
3168 }
3169 else
3170 {
3171 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 4, &uFrame.pv, &uNewRsp);
3172 if (rcStrict != VINF_SUCCESS)
3173 return rcStrict;
3174 uNewESP = uFrame.pu16[0];
3175 uNewSS = uFrame.pu16[1];
3176 }
3177 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
3178 if (rcStrict != VINF_SUCCESS)
3179 return rcStrict;
3180 Log7(("iemCImpl_iret_prot: uNewSS=%#06x uNewESP=%#010x\n", uNewSS, uNewESP));
3181
3182 /* Read the SS descriptor. */
3183 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3184 {
3185 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
3186 return iemRaiseGeneralProtectionFault0(pIemCpu);
3187 }
3188
3189 IEMSELDESC DescSS;
3190 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
3191 if (rcStrict != VINF_SUCCESS)
3192 {
3193 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
3194 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
3195 return rcStrict;
3196 }
3197
3198 /* Privilege checks. */
3199 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3200 {
3201 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
3202 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3203 }
3204 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3205 {
3206 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
3207 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
3208 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3209 }
3210
3211 /* Must be a writeable data segment descriptor. */
3212 if (!DescSS.Legacy.Gen.u1DescType)
3213 {
3214 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
3215 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3216 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3217 }
3218 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3219 {
3220 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
3221 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3222 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3223 }
3224
3225 /* Present? */
3226 if (!DescSS.Legacy.Gen.u1Present)
3227 {
3228 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
3229 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
3230 }
3231
3232 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3233
3234 /* Check EIP. */
3235 if (uNewEip > cbLimitCS)
3236 {
3237 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
3238 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
3239 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3240 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3241 }
3242
3243 /*
3244 * Commit the changes, marking CS and SS accessed first since
3245 * that may fail.
3246 */
3247 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3248 {
3249 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
3250 if (rcStrict != VINF_SUCCESS)
3251 return rcStrict;
3252 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3253 }
3254 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3255 {
3256 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3257 if (rcStrict != VINF_SUCCESS)
3258 return rcStrict;
3259 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3260 }
3261
3262 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3263 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3264 if (enmEffOpSize != IEMMODE_16BIT)
3265 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3266 if (pIemCpu->uCpl == 0)
3267 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3268 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
3269 fEFlagsMask |= X86_EFL_IF;
3270 if (IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_386)
3271 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3272 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
3273 fEFlagsNew &= ~fEFlagsMask;
3274 fEFlagsNew |= uNewFlags & fEFlagsMask;
3275#ifdef DBGFTRACE_ENABLED
3276 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
3277 pIemCpu->uCpl, uNewCs & X86_SEL_RPL, pCtx->cs.Sel, pCtx->eip,
3278 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP);
3279#endif
3280
3281 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
3282 pCtx->rip = uNewEip;
3283 pCtx->cs.Sel = uNewCs;
3284 pCtx->cs.ValidSel = uNewCs;
3285 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3286 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3287 pCtx->cs.u32Limit = cbLimitCS;
3288 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3289 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
3290 if (!pCtx->ss.Attr.n.u1DefBig)
3291 pCtx->sp = (uint16_t)uNewESP;
3292 else
3293 pCtx->rsp = uNewESP;
3294 pCtx->ss.Sel = uNewSS;
3295 pCtx->ss.ValidSel = uNewSS;
3296 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3297 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3298 pCtx->ss.u32Limit = cbLimitSs;
3299 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3300
3301 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
3302 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
3303 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
3304 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
3305 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
3306
3307 /* Done! */
3308
3309 }
3310 /*
3311 * Return to the same level.
3312 */
3313 else
3314 {
3315 /* Check EIP. */
3316 if (uNewEip > cbLimitCS)
3317 {
3318 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
3319 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3320 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3321 }
3322
3323 /*
3324 * Commit the changes, marking CS first since it may fail.
3325 */
3326 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3327 {
3328 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
3329 if (rcStrict != VINF_SUCCESS)
3330 return rcStrict;
3331 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3332 }
3333
3334 X86EFLAGS NewEfl;
3335 NewEfl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
3336 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3337 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3338 if (enmEffOpSize != IEMMODE_16BIT)
3339 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3340 if (pIemCpu->uCpl == 0)
3341 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3342 else if (pIemCpu->uCpl <= NewEfl.Bits.u2IOPL)
3343 fEFlagsMask |= X86_EFL_IF;
3344 if (IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_386)
3345 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3346 NewEfl.u &= ~fEFlagsMask;
3347 NewEfl.u |= fEFlagsMask & uNewFlags;
3348#ifdef DBGFTRACE_ENABLED
3349 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
3350 pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip,
3351 uNewCs, uNewEip, uNewFlags, pCtx->ss.Sel, uNewRsp);
3352#endif
3353
3354 IEMMISC_SET_EFL(pIemCpu, pCtx, NewEfl.u);
3355 pCtx->rip = uNewEip;
3356 pCtx->cs.Sel = uNewCs;
3357 pCtx->cs.ValidSel = uNewCs;
3358 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3359 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3360 pCtx->cs.u32Limit = cbLimitCS;
3361 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3362 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
3363 pCtx->rsp = uNewRsp;
3364 /* Done! */
3365 }
3366 return VINF_SUCCESS;
3367}
3368
3369
3370/**
3371 * Implements iret for long mode
3372 *
3373 * @param enmEffOpSize The effective operand size.
3374 */
3375IEM_CIMPL_DEF_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize)
3376{
3377 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3378 NOREF(cbInstr);
3379
3380 /*
3381 * Nested task return is not supported in long mode.
3382 */
3383 if (pCtx->eflags.Bits.u1NT)
3384 {
3385 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pCtx->eflags.u));
3386 return iemRaiseGeneralProtectionFault0(pIemCpu);
3387 }
3388
3389 /*
3390 * Normal return.
3391 *
3392 * Do the stack bits, but don't commit RSP before everything checks
3393 * out right.
3394 */
3395 VBOXSTRICTRC rcStrict;
3396 RTCPTRUNION uFrame;
3397 uint64_t uNewRip;
3398 uint16_t uNewCs;
3399 uint16_t uNewSs;
3400 uint32_t uNewFlags;
3401 uint64_t uNewRsp;
3402 if (enmEffOpSize == IEMMODE_64BIT)
3403 {
3404 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*8, &uFrame.pv, &uNewRsp);
3405 if (rcStrict != VINF_SUCCESS)
3406 return rcStrict;
3407 uNewRip = uFrame.pu64[0];
3408 uNewCs = (uint16_t)uFrame.pu64[1];
3409 uNewFlags = (uint32_t)uFrame.pu64[2];
3410 uNewRsp = uFrame.pu64[3];
3411 uNewSs = (uint16_t)uFrame.pu64[4];
3412 }
3413 else if (enmEffOpSize == IEMMODE_32BIT)
3414 {
3415 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*4, &uFrame.pv, &uNewRsp);
3416 if (rcStrict != VINF_SUCCESS)
3417 return rcStrict;
3418 uNewRip = uFrame.pu32[0];
3419 uNewCs = (uint16_t)uFrame.pu32[1];
3420 uNewFlags = uFrame.pu32[2];
3421 uNewRsp = uFrame.pu32[3];
3422 uNewSs = (uint16_t)uFrame.pu32[4];
3423 }
3424 else
3425 {
3426 Assert(enmEffOpSize == IEMMODE_16BIT);
3427 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*2, &uFrame.pv, &uNewRsp);
3428 if (rcStrict != VINF_SUCCESS)
3429 return rcStrict;
3430 uNewRip = uFrame.pu16[0];
3431 uNewCs = uFrame.pu16[1];
3432 uNewFlags = uFrame.pu16[2];
3433 uNewRsp = uFrame.pu16[3];
3434 uNewSs = uFrame.pu16[4];
3435 }
3436 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3437 if (rcStrict != VINF_SUCCESS)
3438 return rcStrict;
3439 Log7(("iretq stack: cs:rip=%04x:%016RX64 rflags=%016RX64 ss:rsp=%04x:%016RX64\n", uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
3440
3441 /*
3442 * Check stuff.
3443 */
3444 /* Read the CS descriptor. */
3445 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3446 {
3447 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3448 return iemRaiseGeneralProtectionFault0(pIemCpu);
3449 }
3450
3451 IEMSELDESC DescCS;
3452 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs, X86_XCPT_GP);
3453 if (rcStrict != VINF_SUCCESS)
3454 {
3455 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
3456 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3457 return rcStrict;
3458 }
3459
3460 /* Must be a code descriptor. */
3461 if ( !DescCS.Legacy.Gen.u1DescType
3462 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3463 {
3464 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
3465 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3466 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3467 }
3468
3469 /* Privilege checks. */
3470 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
3471 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3472 {
3473 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3474 {
3475 Log(("iret %04x:%016RX64 - RPL != DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3476 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3477 }
3478 }
3479 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3480 {
3481 Log(("iret %04x:%016RX64 - RPL < DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3482 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3483 }
3484 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
3485 {
3486 Log(("iret %04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, pIemCpu->uCpl));
3487 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3488 }
3489
3490 /* Present? */
3491 if (!DescCS.Legacy.Gen.u1Present)
3492 {
3493 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3494 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
3495 }
3496
3497 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3498
3499 /* Read the SS descriptor. */
3500 IEMSELDESC DescSS;
3501 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3502 {
3503 if ( !DescCS.Legacy.Gen.u1Long
3504 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
3505 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
3506 {
3507 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3508 return iemRaiseGeneralProtectionFault0(pIemCpu);
3509 }
3510 DescSS.Legacy.u = 0;
3511 }
3512 else
3513 {
3514 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
3515 if (rcStrict != VINF_SUCCESS)
3516 {
3517 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
3518 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3519 return rcStrict;
3520 }
3521 }
3522
3523 /* Privilege checks. */
3524 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3525 {
3526 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3527 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3528 }
3529
3530 uint32_t cbLimitSs;
3531 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3532 cbLimitSs = UINT32_MAX;
3533 else
3534 {
3535 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3536 {
3537 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
3538 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
3539 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3540 }
3541
3542 /* Must be a writeable data segment descriptor. */
3543 if (!DescSS.Legacy.Gen.u1DescType)
3544 {
3545 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
3546 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3547 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3548 }
3549 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3550 {
3551 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
3552 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3553 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3554 }
3555
3556 /* Present? */
3557 if (!DescSS.Legacy.Gen.u1Present)
3558 {
3559 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3560 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSs);
3561 }
3562 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3563 }
3564
3565 /* Check EIP. */
3566 if (DescCS.Legacy.Gen.u1Long)
3567 {
3568 if (!IEM_IS_CANONICAL(uNewRip))
3569 {
3570 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
3571 uNewCs, uNewRip, uNewSs, uNewRsp));
3572 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3573 }
3574 }
3575 else
3576 {
3577 if (uNewRip > cbLimitCS)
3578 {
3579 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
3580 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
3581 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3582 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3583 }
3584 }
3585
3586 /*
3587 * Commit the changes, marking CS and SS accessed first since
3588 * that may fail.
3589 */
3590 /** @todo where exactly are these actually marked accessed by a real CPU? */
3591 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3592 {
3593 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
3594 if (rcStrict != VINF_SUCCESS)
3595 return rcStrict;
3596 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3597 }
3598 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3599 {
3600 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSs);
3601 if (rcStrict != VINF_SUCCESS)
3602 return rcStrict;
3603 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3604 }
3605
3606 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3607 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3608 if (enmEffOpSize != IEMMODE_16BIT)
3609 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3610 if (pIemCpu->uCpl == 0)
3611 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
3612 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
3613 fEFlagsMask |= X86_EFL_IF;
3614 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
3615 fEFlagsNew &= ~fEFlagsMask;
3616 fEFlagsNew |= uNewFlags & fEFlagsMask;
3617#ifdef DBGFTRACE_ENABLED
3618 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
3619 pIemCpu->uCpl, uNewCpl, pCtx->rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);
3620#endif
3621
3622 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
3623 pCtx->rip = uNewRip;
3624 pCtx->cs.Sel = uNewCs;
3625 pCtx->cs.ValidSel = uNewCs;
3626 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3627 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3628 pCtx->cs.u32Limit = cbLimitCS;
3629 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3630 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
3631 if (pCtx->cs.Attr.n.u1Long || pCtx->cs.Attr.n.u1DefBig)
3632 pCtx->rsp = uNewRsp;
3633 else
3634 pCtx->sp = (uint16_t)uNewRsp;
3635 pCtx->ss.Sel = uNewSs;
3636 pCtx->ss.ValidSel = uNewSs;
3637 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3638 {
3639 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3640 pCtx->ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
3641 pCtx->ss.u32Limit = UINT32_MAX;
3642 pCtx->ss.u64Base = 0;
3643 Log2(("iretq new SS: NULL\n"));
3644 }
3645 else
3646 {
3647 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3648 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3649 pCtx->ss.u32Limit = cbLimitSs;
3650 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3651 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u));
3652 }
3653
3654 if (pIemCpu->uCpl != uNewCpl)
3655 {
3656 pIemCpu->uCpl = uNewCpl;
3657 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->ds);
3658 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->es);
3659 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->fs);
3660 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->gs);
3661 }
3662
3663 return VINF_SUCCESS;
3664}
3665
3666
3667/**
3668 * Implements iret.
3669 *
3670 * @param enmEffOpSize The effective operand size.
3671 */
3672IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
3673{
3674 /*
3675 * First, clear NMI blocking, if any, before causing any exceptions.
3676 */
3677 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3678 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
3679
3680 /*
3681 * Call a mode specific worker.
3682 */
3683 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3684 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
3685 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3686 return IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize);
3687 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
3688}
3689
3690
3691/**
3692 * Implements SYSCALL (AMD and Intel64).
3693 *
3694 * @param enmEffOpSize The effective operand size.
3695 */
3696IEM_CIMPL_DEF_0(iemCImpl_syscall)
3697{
3698 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3699
3700 /*
3701 * Check preconditions.
3702 *
3703 * Note that CPUs described in the documentation may load a few odd values
3704 * into CS and SS than we allow here. This has yet to be checked on real
3705 * hardware.
3706 */
3707 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
3708 {
3709 Log(("syscall: Not enabled in EFER -> #UD\n"));
3710 return iemRaiseUndefinedOpcode(pIemCpu);
3711 }
3712 if (!(pCtx->cr0 & X86_CR0_PE))
3713 {
3714 Log(("syscall: Protected mode is required -> #GP(0)\n"));
3715 return iemRaiseGeneralProtectionFault0(pIemCpu);
3716 }
3717 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !CPUMIsGuestInLongModeEx(pCtx))
3718 {
3719 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3720 return iemRaiseUndefinedOpcode(pIemCpu);
3721 }
3722
3723 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
3724 /** @todo what about LDT selectors? Shouldn't matter, really. */
3725 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
3726 uint16_t uNewSs = uNewCs + 8;
3727 if (uNewCs == 0 || uNewSs == 0)
3728 {
3729 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
3730 return iemRaiseGeneralProtectionFault0(pIemCpu);
3731 }
3732
3733 /* Long mode and legacy mode differs. */
3734 if (CPUMIsGuestInLongModeEx(pCtx))
3735 {
3736 uint64_t uNewRip = pIemCpu->enmCpuMode == IEMMODE_64BIT ? pCtx->msrLSTAR : pCtx-> msrCSTAR;
3737
3738 /* This test isn't in the docs, but I'm not trusting the guys writing
3739 the MSRs to have validated the values as canonical like they should. */
3740 if (!IEM_IS_CANONICAL(uNewRip))
3741 {
3742 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3743 return iemRaiseUndefinedOpcode(pIemCpu);
3744 }
3745
3746 /*
3747 * Commit it.
3748 */
3749 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, uNewRip));
3750 pCtx->rcx = pCtx->rip + cbInstr;
3751 pCtx->rip = uNewRip;
3752
3753 pCtx->rflags.u &= ~X86_EFL_RF;
3754 pCtx->r11 = pCtx->rflags.u;
3755 pCtx->rflags.u &= ~pCtx->msrSFMASK;
3756 pCtx->rflags.u |= X86_EFL_1;
3757
3758 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3759 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3760 }
3761 else
3762 {
3763 /*
3764 * Commit it.
3765 */
3766 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n",
3767 pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, (uint32_t)(pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
3768 pCtx->rcx = pCtx->eip + cbInstr;
3769 pCtx->rip = pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
3770 pCtx->rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
3771
3772 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3773 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3774 }
3775 pCtx->cs.Sel = uNewCs;
3776 pCtx->cs.ValidSel = uNewCs;
3777 pCtx->cs.u64Base = 0;
3778 pCtx->cs.u32Limit = UINT32_MAX;
3779 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3780
3781 pCtx->ss.Sel = uNewSs;
3782 pCtx->ss.ValidSel = uNewSs;
3783 pCtx->ss.u64Base = 0;
3784 pCtx->ss.u32Limit = UINT32_MAX;
3785 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3786
3787 return VINF_SUCCESS;
3788}
3789
3790
3791/**
3792 * Implements SYSRET (AMD and Intel64).
3793 */
3794IEM_CIMPL_DEF_0(iemCImpl_sysret)
3795
3796{
3797 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3798
3799 /*
3800 * Check preconditions.
3801 *
3802 * Note that CPUs described in the documentation may load a few odd values
3803 * into CS and SS than we allow here. This has yet to be checked on real
3804 * hardware.
3805 */
3806 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
3807 {
3808 Log(("sysret: Not enabled in EFER -> #UD\n"));
3809 return iemRaiseUndefinedOpcode(pIemCpu);
3810 }
3811 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !CPUMIsGuestInLongModeEx(pCtx))
3812 {
3813 Log(("sysret: Only available in long mode on intel -> #UD\n"));
3814 return iemRaiseUndefinedOpcode(pIemCpu);
3815 }
3816 if (!(pCtx->cr0 & X86_CR0_PE))
3817 {
3818 Log(("sysret: Protected mode is required -> #GP(0)\n"));
3819 return iemRaiseGeneralProtectionFault0(pIemCpu);
3820 }
3821 if (pIemCpu->uCpl != 0)
3822 {
3823 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", pIemCpu->uCpl));
3824 return iemRaiseGeneralProtectionFault0(pIemCpu);
3825 }
3826
3827 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
3828 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
3829 uint16_t uNewSs = uNewCs + 8;
3830 if (pIemCpu->enmEffOpSize == IEMMODE_64BIT)
3831 uNewCs += 16;
3832 if (uNewCs == 0 || uNewSs == 0)
3833 {
3834 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
3835 return iemRaiseGeneralProtectionFault0(pIemCpu);
3836 }
3837
3838 /*
3839 * Commit it.
3840 */
3841 if (CPUMIsGuestInLongModeEx(pCtx))
3842 {
3843 if (pIemCpu->enmEffOpSize == IEMMODE_64BIT)
3844 {
3845 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n",
3846 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->rcx, pCtx->r11));
3847 /* Note! We disregard intel manual regarding the RCX cananonical
3848 check, ask intel+xen why AMD doesn't do it. */
3849 pCtx->rip = pCtx->rcx;
3850 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
3851 | (3 << X86DESCATTR_DPL_SHIFT);
3852 }
3853 else
3854 {
3855 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n",
3856 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->ecx, pCtx->r11));
3857 pCtx->rip = pCtx->ecx;
3858 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
3859 | (3 << X86DESCATTR_DPL_SHIFT);
3860 }
3861 /** @todo testcase: See what kind of flags we can make SYSRET restore and
3862 * what it really ignores. RF and VM are hinted at being zero, by AMD. */
3863 pCtx->rflags.u = pCtx->r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
3864 pCtx->rflags.u |= X86_EFL_1;
3865 }
3866 else
3867 {
3868 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, pCtx->ecx));
3869 pCtx->rip = pCtx->rcx;
3870 pCtx->rflags.u |= X86_EFL_IF;
3871 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
3872 | (3 << X86DESCATTR_DPL_SHIFT);
3873 }
3874 pCtx->cs.Sel = uNewCs | 3;
3875 pCtx->cs.ValidSel = uNewCs | 3;
3876 pCtx->cs.u64Base = 0;
3877 pCtx->cs.u32Limit = UINT32_MAX;
3878 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3879
3880 pCtx->ss.Sel = uNewSs | 3;
3881 pCtx->ss.ValidSel = uNewSs | 3;
3882 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3883 /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */
3884 pCtx->ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
3885 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
3886 * on sysret. */
3887
3888 return VINF_SUCCESS;
3889}
3890
3891
3892/**
3893 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
3894 *
3895 * @param iSegReg The segment register number (valid).
3896 * @param uSel The new selector value.
3897 */
3898IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
3899{
3900 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
3901 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
3902 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
3903
3904 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
3905
3906 /*
3907 * Real mode and V8086 mode are easy.
3908 */
3909 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
3910 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3911 {
3912 *pSel = uSel;
3913 pHid->u64Base = (uint32_t)uSel << 4;
3914 pHid->ValidSel = uSel;
3915 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
3916#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
3917 /** @todo Does the CPU actually load limits and attributes in the
3918 * real/V8086 mode segment load case? It doesn't for CS in far
3919 * jumps... Affects unreal mode. */
3920 pHid->u32Limit = 0xffff;
3921 pHid->Attr.u = 0;
3922 pHid->Attr.n.u1Present = 1;
3923 pHid->Attr.n.u1DescType = 1;
3924 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
3925 ? X86_SEL_TYPE_RW
3926 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
3927#endif
3928 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
3929 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
3930 return VINF_SUCCESS;
3931 }
3932
3933 /*
3934 * Protected mode.
3935 *
3936 * Check if it's a null segment selector value first, that's OK for DS, ES,
3937 * FS and GS. If not null, then we have to load and parse the descriptor.
3938 */
3939 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3940 {
3941 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
3942 if (iSegReg == X86_SREG_SS)
3943 {
3944 /* In 64-bit kernel mode, the stack can be 0 because of the way
3945 interrupts are dispatched. AMD seems to have a slighly more
3946 relaxed relationship to SS.RPL than intel does. */
3947 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
3948 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
3949 || pIemCpu->uCpl > 2
3950 || ( uSel != pIemCpu->uCpl
3951 && !IEM_IS_GUEST_CPU_AMD(pIemCpu)) )
3952 {
3953 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
3954 return iemRaiseGeneralProtectionFault0(pIemCpu);
3955 }
3956 }
3957
3958 *pSel = uSel; /* Not RPL, remember :-) */
3959 iemHlpLoadNullDataSelectorProt(pIemCpu, pHid, uSel);
3960 if (iSegReg == X86_SREG_SS)
3961 pHid->Attr.u |= pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT;
3962
3963 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
3964 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
3965
3966 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
3967 return VINF_SUCCESS;
3968 }
3969
3970 /* Fetch the descriptor. */
3971 IEMSELDESC Desc;
3972 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
3973 if (rcStrict != VINF_SUCCESS)
3974 return rcStrict;
3975
3976 /* Check GPs first. */
3977 if (!Desc.Legacy.Gen.u1DescType)
3978 {
3979 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
3980 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3981 }
3982 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
3983 {
3984 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3985 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3986 {
3987 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
3988 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3989 }
3990 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
3991 {
3992 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
3993 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3994 }
3995 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
3996 {
3997 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3998 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3999 }
4000 }
4001 else
4002 {
4003 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4004 {
4005 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
4006 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4007 }
4008 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4009 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4010 {
4011#if 0 /* this is what intel says. */
4012 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4013 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4014 {
4015 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
4016 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
4017 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4018 }
4019#else /* this is what makes more sense. */
4020 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4021 {
4022 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4023 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4024 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4025 }
4026 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4027 {
4028 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4029 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
4030 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4031 }
4032#endif
4033 }
4034 }
4035
4036 /* Is it there? */
4037 if (!Desc.Legacy.Gen.u1Present)
4038 {
4039 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4040 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
4041 }
4042
4043 /* The base and limit. */
4044 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4045 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
4046
4047 /*
4048 * Ok, everything checked out fine. Now set the accessed bit before
4049 * committing the result into the registers.
4050 */
4051 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4052 {
4053 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
4054 if (rcStrict != VINF_SUCCESS)
4055 return rcStrict;
4056 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4057 }
4058
4059 /* commit */
4060 *pSel = uSel;
4061 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4062 pHid->u32Limit = cbLimit;
4063 pHid->u64Base = u64Base;
4064 pHid->ValidSel = uSel;
4065 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4066
4067 /** @todo check if the hidden bits are loaded correctly for 64-bit
4068 * mode. */
4069 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
4070
4071 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
4072 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4073 return VINF_SUCCESS;
4074}
4075
4076
4077/**
4078 * Implements 'mov SReg, r/m'.
4079 *
4080 * @param iSegReg The segment register number (valid).
4081 * @param uSel The new selector value.
4082 */
4083IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
4084{
4085 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4086 if (rcStrict == VINF_SUCCESS)
4087 {
4088 if (iSegReg == X86_SREG_SS)
4089 {
4090 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4091 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4092 }
4093 }
4094 return rcStrict;
4095}
4096
4097
4098/**
4099 * Implements 'pop SReg'.
4100 *
4101 * @param iSegReg The segment register number (valid).
4102 * @param enmEffOpSize The efficient operand size (valid).
4103 */
4104IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4105{
4106 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4107 VBOXSTRICTRC rcStrict;
4108
4109 /*
4110 * Read the selector off the stack and join paths with mov ss, reg.
4111 */
4112 RTUINT64U TmpRsp;
4113 TmpRsp.u = pCtx->rsp;
4114 switch (enmEffOpSize)
4115 {
4116 case IEMMODE_16BIT:
4117 {
4118 uint16_t uSel;
4119 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
4120 if (rcStrict == VINF_SUCCESS)
4121 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4122 break;
4123 }
4124
4125 case IEMMODE_32BIT:
4126 {
4127 uint32_t u32Value;
4128 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
4129 if (rcStrict == VINF_SUCCESS)
4130 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
4131 break;
4132 }
4133
4134 case IEMMODE_64BIT:
4135 {
4136 uint64_t u64Value;
4137 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
4138 if (rcStrict == VINF_SUCCESS)
4139 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
4140 break;
4141 }
4142 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4143 }
4144
4145 /*
4146 * Commit the stack on success.
4147 */
4148 if (rcStrict == VINF_SUCCESS)
4149 {
4150 pCtx->rsp = TmpRsp.u;
4151 if (iSegReg == X86_SREG_SS)
4152 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4153 }
4154 return rcStrict;
4155}
4156
4157
4158/**
4159 * Implements lgs, lfs, les, lds & lss.
4160 */
4161IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
4162 uint16_t, uSel,
4163 uint64_t, offSeg,
4164 uint8_t, iSegReg,
4165 uint8_t, iGReg,
4166 IEMMODE, enmEffOpSize)
4167{
4168 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
4169 VBOXSTRICTRC rcStrict;
4170
4171 /*
4172 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
4173 */
4174 /** @todo verify and test that mov, pop and lXs works the segment
4175 * register loading in the exact same way. */
4176 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4177 if (rcStrict == VINF_SUCCESS)
4178 {
4179 switch (enmEffOpSize)
4180 {
4181 case IEMMODE_16BIT:
4182 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4183 break;
4184 case IEMMODE_32BIT:
4185 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4186 break;
4187 case IEMMODE_64BIT:
4188 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4189 break;
4190 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4191 }
4192 }
4193
4194 return rcStrict;
4195}
4196
4197
4198/**
4199 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
4200 *
4201 * @retval VINF_SUCCESS on success.
4202 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
4203 * @retval iemMemFetchSysU64 return value.
4204 *
4205 * @param pIemCpu The IEM state of the calling EMT.
4206 * @param uSel The selector value.
4207 * @param fAllowSysDesc Whether system descriptors are OK or not.
4208 * @param pDesc Where to return the descriptor on success.
4209 */
4210static VBOXSTRICTRC iemCImpl_LoadDescHelper(PIEMCPU pIemCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
4211{
4212 pDesc->Long.au64[0] = 0;
4213 pDesc->Long.au64[1] = 0;
4214
4215 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */
4216 return VINF_IEM_SELECTOR_NOT_OK;
4217
4218 /* Within the table limits? */
4219 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4220 RTGCPTR GCPtrBase;
4221 if (uSel & X86_SEL_LDT)
4222 {
4223 if ( !pCtx->ldtr.Attr.n.u1Present
4224 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
4225 return VINF_IEM_SELECTOR_NOT_OK;
4226 GCPtrBase = pCtx->ldtr.u64Base;
4227 }
4228 else
4229 {
4230 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
4231 return VINF_IEM_SELECTOR_NOT_OK;
4232 GCPtrBase = pCtx->gdtr.pGdt;
4233 }
4234
4235 /* Fetch the descriptor. */
4236 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4237 if (rcStrict != VINF_SUCCESS)
4238 return rcStrict;
4239 if (!pDesc->Legacy.Gen.u1DescType)
4240 {
4241 if (!fAllowSysDesc)
4242 return VINF_IEM_SELECTOR_NOT_OK;
4243 if (CPUMIsGuestInLongModeEx(pCtx))
4244 {
4245 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
4246 if (rcStrict != VINF_SUCCESS)
4247 return rcStrict;
4248 }
4249
4250 }
4251
4252 return VINF_SUCCESS;
4253}
4254
4255
4256/**
4257 * Implements verr (fWrite = false) and verw (fWrite = true).
4258 */
4259IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite)
4260{
4261 Assert(!IEM_IS_REAL_OR_V86_MODE(pIemCpu));
4262
4263 /** @todo figure whether the accessed bit is set or not. */
4264
4265 bool fAccessible = true;
4266 IEMSELDESC Desc;
4267 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pIemCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4268 if (rcStrict == VINF_SUCCESS)
4269 {
4270 /* Check the descriptor, order doesn't matter much here. */
4271 if ( !Desc.Legacy.Gen.u1DescType
4272 || !Desc.Legacy.Gen.u1Present)
4273 fAccessible = false;
4274 else
4275 {
4276 if ( fWrite
4277 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE
4278 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4279 fAccessible = false;
4280
4281 /** @todo testcase for the conforming behavior. */
4282 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4283 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4284 {
4285 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4286 fAccessible = false;
4287 else if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4288 fAccessible = false;
4289 }
4290 }
4291
4292 }
4293 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4294 fAccessible = false;
4295 else
4296 return rcStrict;
4297
4298 /* commit */
4299 pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fAccessible;
4300
4301 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4302 return VINF_SUCCESS;
4303}
4304
4305
4306/**
4307 * Implements LAR and LSL with 64-bit operand size.
4308 *
4309 * @returns VINF_SUCCESS.
4310 * @param pu16Dst Pointer to the destination register.
4311 * @param uSel The selector to load details for.
4312 * @param pEFlags Pointer to the eflags register.
4313 * @param fIsLar true = LAR, false = LSL.
4314 */
4315IEM_CIMPL_DEF_4(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, uint32_t *, pEFlags, bool, fIsLar)
4316{
4317 Assert(!IEM_IS_REAL_OR_V86_MODE(pIemCpu));
4318
4319 /** @todo figure whether the accessed bit is set or not. */
4320
4321 bool fDescOk = true;
4322 IEMSELDESC Desc;
4323 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pIemCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4324 if (rcStrict == VINF_SUCCESS)
4325 {
4326 /*
4327 * Check the descriptor type.
4328 */
4329 if (!Desc.Legacy.Gen.u1DescType)
4330 {
4331 if (CPUMIsGuestInLongModeEx(pIemCpu->CTX_SUFF(pCtx)))
4332 {
4333 if (Desc.Long.Gen.u5Zeros)
4334 fDescOk = false;
4335 else
4336 switch (Desc.Long.Gen.u4Type)
4337 {
4338 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
4339 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
4340 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
4341 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
4342 break;
4343 case AMD64_SEL_TYPE_SYS_CALL_GATE:
4344 fDescOk = fIsLar;
4345 break;
4346 default:
4347 fDescOk = false;
4348 break;
4349 }
4350 }
4351 else
4352 {
4353 switch (Desc.Long.Gen.u4Type)
4354 {
4355 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4356 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4357 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4358 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4359 case X86_SEL_TYPE_SYS_LDT:
4360 break;
4361 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4362 case X86_SEL_TYPE_SYS_TASK_GATE:
4363 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4364 fDescOk = fIsLar;
4365 break;
4366 default:
4367 fDescOk = false;
4368 break;
4369 }
4370 }
4371 }
4372 if (fDescOk)
4373 {
4374 /*
4375 * Check the RPL/DPL/CPL interaction..
4376 */
4377 /** @todo testcase for the conforming behavior. */
4378 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
4379 || !Desc.Legacy.Gen.u1DescType)
4380 {
4381 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4382 fDescOk = false;
4383 else if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4384 fDescOk = false;
4385 }
4386 }
4387
4388 if (fDescOk)
4389 {
4390 /*
4391 * All fine, start committing the result.
4392 */
4393 if (fIsLar)
4394 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00);
4395 else
4396 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy);
4397 }
4398
4399 }
4400 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4401 fDescOk = false;
4402 else
4403 return rcStrict;
4404
4405 /* commit flags value and advance rip. */
4406 pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fDescOk;
4407 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4408
4409 return VINF_SUCCESS;
4410}
4411
4412
4413/**
4414 * Implements LAR and LSL with 16-bit operand size.
4415 *
4416 * @returns VINF_SUCCESS.
4417 * @param pu16Dst Pointer to the destination register.
4418 * @param u16Sel The selector to load details for.
4419 * @param pEFlags Pointer to the eflags register.
4420 * @param fIsLar true = LAR, false = LSL.
4421 */
4422IEM_CIMPL_DEF_4(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, uint32_t *, pEFlags, bool, fIsLar)
4423{
4424 uint64_t u64TmpDst = *pu16Dst;
4425 IEM_CIMPL_CALL_4(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, pEFlags, fIsLar);
4426 *pu16Dst = (uint16_t)u64TmpDst;
4427 return VINF_SUCCESS;
4428}
4429
4430
4431/**
4432 * Implements lgdt.
4433 *
4434 * @param iEffSeg The segment of the new gdtr contents
4435 * @param GCPtrEffSrc The address of the new gdtr contents.
4436 * @param enmEffOpSize The effective operand size.
4437 */
4438IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4439{
4440 if (pIemCpu->uCpl != 0)
4441 return iemRaiseGeneralProtectionFault0(pIemCpu);
4442 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
4443
4444 /*
4445 * Fetch the limit and base address.
4446 */
4447 uint16_t cbLimit;
4448 RTGCPTR GCPtrBase;
4449 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4450 if (rcStrict == VINF_SUCCESS)
4451 {
4452 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
4453 || X86_IS_CANONICAL(GCPtrBase))
4454 {
4455 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4456 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
4457 else
4458 {
4459 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4460 pCtx->gdtr.cbGdt = cbLimit;
4461 pCtx->gdtr.pGdt = GCPtrBase;
4462 }
4463 if (rcStrict == VINF_SUCCESS)
4464 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4465 }
4466 else
4467 {
4468 Log(("iemCImpl_lgdt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
4469 return iemRaiseGeneralProtectionFault0(pIemCpu);
4470 }
4471 }
4472 return rcStrict;
4473}
4474
4475
4476/**
4477 * Implements sgdt.
4478 *
4479 * @param iEffSeg The segment where to store the gdtr content.
4480 * @param GCPtrEffDst The address where to store the gdtr content.
4481 */
4482IEM_CIMPL_DEF_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4483{
4484 /*
4485 * Join paths with sidt.
4486 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4487 * you really must know.
4488 */
4489 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4490 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst);
4491 if (rcStrict == VINF_SUCCESS)
4492 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4493 return rcStrict;
4494}
4495
4496
4497/**
4498 * Implements lidt.
4499 *
4500 * @param iEffSeg The segment of the new idtr contents
4501 * @param GCPtrEffSrc The address of the new idtr contents.
4502 * @param enmEffOpSize The effective operand size.
4503 */
4504IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4505{
4506 if (pIemCpu->uCpl != 0)
4507 return iemRaiseGeneralProtectionFault0(pIemCpu);
4508 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
4509
4510 /*
4511 * Fetch the limit and base address.
4512 */
4513 uint16_t cbLimit;
4514 RTGCPTR GCPtrBase;
4515 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4516 if (rcStrict == VINF_SUCCESS)
4517 {
4518 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
4519 || X86_IS_CANONICAL(GCPtrBase))
4520 {
4521 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4522 CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
4523 else
4524 {
4525 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4526 pCtx->idtr.cbIdt = cbLimit;
4527 pCtx->idtr.pIdt = GCPtrBase;
4528 }
4529 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4530 }
4531 else
4532 {
4533 Log(("iemCImpl_lidt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
4534 return iemRaiseGeneralProtectionFault0(pIemCpu);
4535 }
4536 }
4537 return rcStrict;
4538}
4539
4540
4541/**
4542 * Implements sidt.
4543 *
4544 * @param iEffSeg The segment where to store the idtr content.
4545 * @param GCPtrEffDst The address where to store the idtr content.
4546 */
4547IEM_CIMPL_DEF_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4548{
4549 /*
4550 * Join paths with sgdt.
4551 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4552 * you really must know.
4553 */
4554 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4555 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst);
4556 if (rcStrict == VINF_SUCCESS)
4557 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4558 return rcStrict;
4559}
4560
4561
4562/**
4563 * Implements lldt.
4564 *
4565 * @param uNewLdt The new LDT selector value.
4566 */
4567IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
4568{
4569 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4570
4571 /*
4572 * Check preconditions.
4573 */
4574 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4575 {
4576 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
4577 return iemRaiseUndefinedOpcode(pIemCpu);
4578 }
4579 if (pIemCpu->uCpl != 0)
4580 {
4581 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
4582 return iemRaiseGeneralProtectionFault0(pIemCpu);
4583 }
4584 if (uNewLdt & X86_SEL_LDT)
4585 {
4586 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
4587 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
4588 }
4589
4590 /*
4591 * Now, loading a NULL selector is easy.
4592 */
4593 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4594 {
4595 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
4596 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4597 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt);
4598 else
4599 pCtx->ldtr.Sel = uNewLdt;
4600 pCtx->ldtr.ValidSel = uNewLdt;
4601 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4602 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
4603 {
4604 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4605 pCtx->ldtr.u64Base = pCtx->ldtr.u32Limit = 0; /* For verfication against REM. */
4606 }
4607 else if (IEM_IS_GUEST_CPU_AMD(pIemCpu))
4608 {
4609 /* AMD-V seems to leave the base and limit alone. */
4610 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4611 }
4612 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
4613 {
4614 /* VT-x (Intel 3960x) seems to be doing the following. */
4615 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;
4616 pCtx->ldtr.u64Base = 0;
4617 pCtx->ldtr.u32Limit = UINT32_MAX;
4618 }
4619
4620 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4621 return VINF_SUCCESS;
4622 }
4623
4624 /*
4625 * Read the descriptor.
4626 */
4627 IEMSELDESC Desc;
4628 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
4629 if (rcStrict != VINF_SUCCESS)
4630 return rcStrict;
4631
4632 /* Check GPs first. */
4633 if (Desc.Legacy.Gen.u1DescType)
4634 {
4635 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4636 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4637 }
4638 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4639 {
4640 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4641 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4642 }
4643 uint64_t u64Base;
4644 if (!IEM_IS_LONG_MODE(pIemCpu))
4645 u64Base = X86DESC_BASE(&Desc.Legacy);
4646 else
4647 {
4648 if (Desc.Long.Gen.u5Zeros)
4649 {
4650 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
4651 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4652 }
4653
4654 u64Base = X86DESC64_BASE(&Desc.Long);
4655 if (!IEM_IS_CANONICAL(u64Base))
4656 {
4657 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
4658 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4659 }
4660 }
4661
4662 /* NP */
4663 if (!Desc.Legacy.Gen.u1Present)
4664 {
4665 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
4666 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
4667 }
4668
4669 /*
4670 * It checks out alright, update the registers.
4671 */
4672/** @todo check if the actual value is loaded or if the RPL is dropped */
4673 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4674 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK_OFF_RPL);
4675 else
4676 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4677 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4678 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4679 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4680 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
4681 pCtx->ldtr.u64Base = u64Base;
4682
4683 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4684 return VINF_SUCCESS;
4685}
4686
4687
4688/**
4689 * Implements lldt.
4690 *
4691 * @param uNewLdt The new LDT selector value.
4692 */
4693IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
4694{
4695 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4696
4697 /*
4698 * Check preconditions.
4699 */
4700 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4701 {
4702 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
4703 return iemRaiseUndefinedOpcode(pIemCpu);
4704 }
4705 if (pIemCpu->uCpl != 0)
4706 {
4707 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
4708 return iemRaiseGeneralProtectionFault0(pIemCpu);
4709 }
4710 if (uNewTr & X86_SEL_LDT)
4711 {
4712 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
4713 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
4714 }
4715 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
4716 {
4717 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
4718 return iemRaiseGeneralProtectionFault0(pIemCpu);
4719 }
4720
4721 /*
4722 * Read the descriptor.
4723 */
4724 IEMSELDESC Desc;
4725 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
4726 if (rcStrict != VINF_SUCCESS)
4727 return rcStrict;
4728
4729 /* Check GPs first. */
4730 if (Desc.Legacy.Gen.u1DescType)
4731 {
4732 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
4733 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4734 }
4735 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
4736 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4737 || IEM_IS_LONG_MODE(pIemCpu)) )
4738 {
4739 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
4740 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4741 }
4742 uint64_t u64Base;
4743 if (!IEM_IS_LONG_MODE(pIemCpu))
4744 u64Base = X86DESC_BASE(&Desc.Legacy);
4745 else
4746 {
4747 if (Desc.Long.Gen.u5Zeros)
4748 {
4749 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
4750 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4751 }
4752
4753 u64Base = X86DESC64_BASE(&Desc.Long);
4754 if (!IEM_IS_CANONICAL(u64Base))
4755 {
4756 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
4757 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4758 }
4759 }
4760
4761 /* NP */
4762 if (!Desc.Legacy.Gen.u1Present)
4763 {
4764 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
4765 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
4766 }
4767
4768 /*
4769 * Set it busy.
4770 * Note! Intel says this should lock down the whole descriptor, but we'll
4771 * restrict our selves to 32-bit for now due to lack of inline
4772 * assembly and such.
4773 */
4774 void *pvDesc;
4775 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW);
4776 if (rcStrict != VINF_SUCCESS)
4777 return rcStrict;
4778 switch ((uintptr_t)pvDesc & 3)
4779 {
4780 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
4781 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
4782 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
4783 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
4784 }
4785 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvDesc, IEM_ACCESS_DATA_RW);
4786 if (rcStrict != VINF_SUCCESS)
4787 return rcStrict;
4788 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4789
4790 /*
4791 * It checks out alright, update the registers.
4792 */
4793/** @todo check if the actual value is loaded or if the RPL is dropped */
4794 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4795 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK_OFF_RPL);
4796 else
4797 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
4798 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
4799 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4800 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4801 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
4802 pCtx->tr.u64Base = u64Base;
4803
4804 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4805 return VINF_SUCCESS;
4806}
4807
4808
4809/**
4810 * Implements mov GReg,CRx.
4811 *
4812 * @param iGReg The general register to store the CRx value in.
4813 * @param iCrReg The CRx register to read (valid).
4814 */
4815IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
4816{
4817 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4818 if (pIemCpu->uCpl != 0)
4819 return iemRaiseGeneralProtectionFault0(pIemCpu);
4820 Assert(!pCtx->eflags.Bits.u1VM);
4821
4822 /* read it */
4823 uint64_t crX;
4824 switch (iCrReg)
4825 {
4826 case 0:
4827 crX = pCtx->cr0;
4828 if (IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_386)
4829 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */
4830 break;
4831 case 2: crX = pCtx->cr2; break;
4832 case 3: crX = pCtx->cr3; break;
4833 case 4: crX = pCtx->cr4; break;
4834 case 8:
4835 {
4836 uint8_t uTpr;
4837 int rc = PDMApicGetTPR(IEMCPU_TO_VMCPU(pIemCpu), &uTpr, NULL, NULL);
4838 if (RT_SUCCESS(rc))
4839 crX = uTpr >> 4;
4840 else
4841 crX = 0;
4842 break;
4843 }
4844 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
4845 }
4846
4847 /* store it */
4848 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4849 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
4850 else
4851 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
4852
4853 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4854 return VINF_SUCCESS;
4855}
4856
4857
4858/**
4859 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
4860 *
4861 * @param iCrReg The CRx register to write (valid).
4862 * @param uNewCrX The new value.
4863 */
4864IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
4865{
4866 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4867 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4868 VBOXSTRICTRC rcStrict;
4869 int rc;
4870
4871 /*
4872 * Try store it.
4873 * Unfortunately, CPUM only does a tiny bit of the work.
4874 */
4875 switch (iCrReg)
4876 {
4877 case 0:
4878 {
4879 /*
4880 * Perform checks.
4881 */
4882 uint64_t const uOldCrX = pCtx->cr0;
4883 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
4884 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
4885 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
4886
4887 /* ET is hardcoded on 486 and later. */
4888 if (IEM_GET_TARGET_CPU(pIemCpu) > IEMTARGETCPU_486)
4889 uNewCrX |= X86_CR0_ET;
4890 /* The 386 and 486 didn't #GP(0) on attempting to set reserved CR0 bits. ET was settable on 386. */
4891 else if (IEM_GET_TARGET_CPU(pIemCpu) == IEMTARGETCPU_486)
4892 {
4893 uNewCrX &= fValid;
4894 uNewCrX |= X86_CR0_ET;
4895 }
4896 else
4897 uNewCrX &= X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG | X86_CR0_ET;
4898
4899 /* Check for reserved bits. */
4900 if (uNewCrX & ~(uint64_t)fValid)
4901 {
4902 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
4903 return iemRaiseGeneralProtectionFault0(pIemCpu);
4904 }
4905
4906 /* Check for invalid combinations. */
4907 if ( (uNewCrX & X86_CR0_PG)
4908 && !(uNewCrX & X86_CR0_PE) )
4909 {
4910 Log(("Trying to set CR0.PG without CR0.PE\n"));
4911 return iemRaiseGeneralProtectionFault0(pIemCpu);
4912 }
4913
4914 if ( !(uNewCrX & X86_CR0_CD)
4915 && (uNewCrX & X86_CR0_NW) )
4916 {
4917 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
4918 return iemRaiseGeneralProtectionFault0(pIemCpu);
4919 }
4920
4921 /* Long mode consistency checks. */
4922 if ( (uNewCrX & X86_CR0_PG)
4923 && !(uOldCrX & X86_CR0_PG)
4924 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
4925 {
4926 if (!(pCtx->cr4 & X86_CR4_PAE))
4927 {
4928 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
4929 return iemRaiseGeneralProtectionFault0(pIemCpu);
4930 }
4931 if (pCtx->cs.Attr.n.u1Long)
4932 {
4933 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
4934 return iemRaiseGeneralProtectionFault0(pIemCpu);
4935 }
4936 }
4937
4938 /** @todo check reserved PDPTR bits as AMD states. */
4939
4940 /*
4941 * Change CR0.
4942 */
4943 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4944 CPUMSetGuestCR0(pVCpu, uNewCrX);
4945 else
4946 pCtx->cr0 = uNewCrX;
4947 Assert(pCtx->cr0 == uNewCrX);
4948
4949 /*
4950 * Change EFER.LMA if entering or leaving long mode.
4951 */
4952 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
4953 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
4954 {
4955 uint64_t NewEFER = pCtx->msrEFER;
4956 if (uNewCrX & X86_CR0_PG)
4957 NewEFER |= MSR_K6_EFER_LMA;
4958 else
4959 NewEFER &= ~MSR_K6_EFER_LMA;
4960
4961 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4962 CPUMSetGuestEFER(pVCpu, NewEFER);
4963 else
4964 pCtx->msrEFER = NewEFER;
4965 Assert(pCtx->msrEFER == NewEFER);
4966 }
4967
4968 /*
4969 * Inform PGM.
4970 */
4971 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4972 {
4973 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
4974 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
4975 {
4976 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
4977 AssertRCReturn(rc, rc);
4978 /* ignore informational status codes */
4979 }
4980 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
4981 }
4982 else
4983 rcStrict = VINF_SUCCESS;
4984
4985#ifdef IN_RC
4986 /* Return to ring-3 for rescheduling if WP or AM changes. */
4987 if ( rcStrict == VINF_SUCCESS
4988 && ( (uNewCrX & (X86_CR0_WP | X86_CR0_AM))
4989 != (uOldCrX & (X86_CR0_WP | X86_CR0_AM))) )
4990 rcStrict = VINF_EM_RESCHEDULE;
4991#endif
4992 break;
4993 }
4994
4995 /*
4996 * CR2 can be changed without any restrictions.
4997 */
4998 case 2:
4999 pCtx->cr2 = uNewCrX;
5000 rcStrict = VINF_SUCCESS;
5001 break;
5002
5003 /*
5004 * CR3 is relatively simple, although AMD and Intel have different
5005 * accounts of how setting reserved bits are handled. We take intel's
5006 * word for the lower bits and AMD's for the high bits (63:52). The
5007 * lower reserved bits are ignored and left alone; OpenBSD 5.8 relies
5008 * on this.
5009 */
5010 /** @todo Testcase: Setting reserved bits in CR3, especially before
5011 * enabling paging. */
5012 case 3:
5013 {
5014 /* check / mask the value. */
5015 if (uNewCrX & UINT64_C(0xfff0000000000000))
5016 {
5017 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
5018 return iemRaiseGeneralProtectionFault0(pIemCpu);
5019 }
5020
5021 uint64_t fValid;
5022 if ( (pCtx->cr4 & X86_CR4_PAE)
5023 && (pCtx->msrEFER & MSR_K6_EFER_LME))
5024 fValid = UINT64_C(0x000fffffffffffff);
5025 else
5026 fValid = UINT64_C(0xffffffff);
5027 if (uNewCrX & ~fValid)
5028 {
5029 Log(("Automatically clearing reserved MBZ bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
5030 uNewCrX, uNewCrX & ~fValid));
5031 uNewCrX &= fValid;
5032 }
5033
5034 /** @todo If we're in PAE mode we should check the PDPTRs for
5035 * invalid bits. */
5036
5037 /* Make the change. */
5038 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5039 {
5040 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
5041 AssertRCSuccessReturn(rc, rc);
5042 }
5043 else
5044 pCtx->cr3 = uNewCrX;
5045
5046 /* Inform PGM. */
5047 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5048 {
5049 if (pCtx->cr0 & X86_CR0_PG)
5050 {
5051 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
5052 AssertRCReturn(rc, rc);
5053 /* ignore informational status codes */
5054 }
5055 }
5056 rcStrict = VINF_SUCCESS;
5057 break;
5058 }
5059
5060 /*
5061 * CR4 is a bit more tedious as there are bits which cannot be cleared
5062 * under some circumstances and such.
5063 */
5064 case 4:
5065 {
5066 uint64_t const uOldCrX = pCtx->cr4;
5067
5068 /** @todo Shouldn't this look at the guest CPUID bits to determine
5069 * valid bits? e.g. if guest CPUID doesn't allow X86_CR4_OSXMMEEXCPT, we
5070 * should #GP(0). */
5071 /* reserved bits */
5072 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
5073 | X86_CR4_TSD | X86_CR4_DE
5074 | X86_CR4_PSE | X86_CR4_PAE
5075 | X86_CR4_MCE | X86_CR4_PGE
5076 | X86_CR4_PCE | X86_CR4_OSFXSR
5077 | X86_CR4_OSXMMEEXCPT;
5078 //if (xxx)
5079 // fValid |= X86_CR4_VMXE;
5080 if (IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fXSaveRstor)
5081 fValid |= X86_CR4_OSXSAVE;
5082 if (uNewCrX & ~(uint64_t)fValid)
5083 {
5084 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5085 return iemRaiseGeneralProtectionFault0(pIemCpu);
5086 }
5087
5088 /* long mode checks. */
5089 if ( (uOldCrX & X86_CR4_PAE)
5090 && !(uNewCrX & X86_CR4_PAE)
5091 && CPUMIsGuestInLongModeEx(pCtx) )
5092 {
5093 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
5094 return iemRaiseGeneralProtectionFault0(pIemCpu);
5095 }
5096
5097
5098 /*
5099 * Change it.
5100 */
5101 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5102 {
5103 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
5104 AssertRCSuccessReturn(rc, rc);
5105 }
5106 else
5107 pCtx->cr4 = uNewCrX;
5108 Assert(pCtx->cr4 == uNewCrX);
5109
5110 /*
5111 * Notify SELM and PGM.
5112 */
5113 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5114 {
5115 /* SELM - VME may change things wrt to the TSS shadowing. */
5116 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
5117 {
5118 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
5119 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
5120#ifdef VBOX_WITH_RAW_MODE
5121 if (!HMIsEnabled(IEMCPU_TO_VM(pIemCpu)))
5122 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
5123#endif
5124 }
5125
5126 /* PGM - flushing and mode. */
5127 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE))
5128 {
5129 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
5130 AssertRCReturn(rc, rc);
5131 /* ignore informational status codes */
5132 }
5133 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
5134 }
5135 else
5136 rcStrict = VINF_SUCCESS;
5137 break;
5138 }
5139
5140 /*
5141 * CR8 maps to the APIC TPR.
5142 */
5143 case 8:
5144 if (uNewCrX & ~(uint64_t)0xf)
5145 {
5146 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
5147 return iemRaiseGeneralProtectionFault0(pIemCpu);
5148 }
5149
5150 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5151 PDMApicSetTPR(IEMCPU_TO_VMCPU(pIemCpu), (uint8_t)uNewCrX << 4);
5152 rcStrict = VINF_SUCCESS;
5153 break;
5154
5155 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5156 }
5157
5158 /*
5159 * Advance the RIP on success.
5160 */
5161 if (RT_SUCCESS(rcStrict))
5162 {
5163 if (rcStrict != VINF_SUCCESS)
5164 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
5165 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5166 }
5167
5168 return rcStrict;
5169}
5170
5171
5172/**
5173 * Implements mov CRx,GReg.
5174 *
5175 * @param iCrReg The CRx register to write (valid).
5176 * @param iGReg The general register to load the DRx value from.
5177 */
5178IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
5179{
5180 if (pIemCpu->uCpl != 0)
5181 return iemRaiseGeneralProtectionFault0(pIemCpu);
5182 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
5183
5184 /*
5185 * Read the new value from the source register and call common worker.
5186 */
5187 uint64_t uNewCrX;
5188 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5189 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
5190 else
5191 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
5192 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
5193}
5194
5195
5196/**
5197 * Implements 'LMSW r/m16'
5198 *
5199 * @param u16NewMsw The new value.
5200 */
5201IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
5202{
5203 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5204
5205 if (pIemCpu->uCpl != 0)
5206 return iemRaiseGeneralProtectionFault0(pIemCpu);
5207 Assert(!pCtx->eflags.Bits.u1VM);
5208
5209 /*
5210 * Compose the new CR0 value and call common worker.
5211 */
5212 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5213 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5214 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
5215}
5216
5217
5218/**
5219 * Implements 'CLTS'.
5220 */
5221IEM_CIMPL_DEF_0(iemCImpl_clts)
5222{
5223 if (pIemCpu->uCpl != 0)
5224 return iemRaiseGeneralProtectionFault0(pIemCpu);
5225
5226 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5227 uint64_t uNewCr0 = pCtx->cr0;
5228 uNewCr0 &= ~X86_CR0_TS;
5229 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
5230}
5231
5232
5233/**
5234 * Implements mov GReg,DRx.
5235 *
5236 * @param iGReg The general register to store the DRx value in.
5237 * @param iDrReg The DRx register to read (0-7).
5238 */
5239IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
5240{
5241 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5242
5243 /*
5244 * Check preconditions.
5245 */
5246
5247 /* Raise GPs. */
5248 if (pIemCpu->uCpl != 0)
5249 return iemRaiseGeneralProtectionFault0(pIemCpu);
5250 Assert(!pCtx->eflags.Bits.u1VM);
5251
5252 if ( (iDrReg == 4 || iDrReg == 5)
5253 && (pCtx->cr4 & X86_CR4_DE) )
5254 {
5255 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
5256 return iemRaiseGeneralProtectionFault0(pIemCpu);
5257 }
5258
5259 /* Raise #DB if general access detect is enabled. */
5260 if (pCtx->dr[7] & X86_DR7_GD)
5261 {
5262 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
5263 return iemRaiseDebugException(pIemCpu);
5264 }
5265
5266 /*
5267 * Read the debug register and store it in the specified general register.
5268 */
5269 uint64_t drX;
5270 switch (iDrReg)
5271 {
5272 case 0: drX = pCtx->dr[0]; break;
5273 case 1: drX = pCtx->dr[1]; break;
5274 case 2: drX = pCtx->dr[2]; break;
5275 case 3: drX = pCtx->dr[3]; break;
5276 case 6:
5277 case 4:
5278 drX = pCtx->dr[6];
5279 drX |= X86_DR6_RA1_MASK;
5280 drX &= ~X86_DR6_RAZ_MASK;
5281 break;
5282 case 7:
5283 case 5:
5284 drX = pCtx->dr[7];
5285 drX |=X86_DR7_RA1_MASK;
5286 drX &= ~X86_DR7_RAZ_MASK;
5287 break;
5288 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5289 }
5290
5291 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5292 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
5293 else
5294 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
5295
5296 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5297 return VINF_SUCCESS;
5298}
5299
5300
5301/**
5302 * Implements mov DRx,GReg.
5303 *
5304 * @param iDrReg The DRx register to write (valid).
5305 * @param iGReg The general register to load the DRx value from.
5306 */
5307IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
5308{
5309 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5310
5311 /*
5312 * Check preconditions.
5313 */
5314 if (pIemCpu->uCpl != 0)
5315 return iemRaiseGeneralProtectionFault0(pIemCpu);
5316 Assert(!pCtx->eflags.Bits.u1VM);
5317
5318 if (iDrReg == 4 || iDrReg == 5)
5319 {
5320 if (pCtx->cr4 & X86_CR4_DE)
5321 {
5322 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
5323 return iemRaiseGeneralProtectionFault0(pIemCpu);
5324 }
5325 iDrReg += 2;
5326 }
5327
5328 /* Raise #DB if general access detect is enabled. */
5329 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
5330 * \#GP? */
5331 if (pCtx->dr[7] & X86_DR7_GD)
5332 {
5333 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
5334 return iemRaiseDebugException(pIemCpu);
5335 }
5336
5337 /*
5338 * Read the new value from the source register.
5339 */
5340 uint64_t uNewDrX;
5341 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5342 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
5343 else
5344 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
5345
5346 /*
5347 * Adjust it.
5348 */
5349 switch (iDrReg)
5350 {
5351 case 0:
5352 case 1:
5353 case 2:
5354 case 3:
5355 /* nothing to adjust */
5356 break;
5357
5358 case 6:
5359 if (uNewDrX & X86_DR6_MBZ_MASK)
5360 {
5361 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5362 return iemRaiseGeneralProtectionFault0(pIemCpu);
5363 }
5364 uNewDrX |= X86_DR6_RA1_MASK;
5365 uNewDrX &= ~X86_DR6_RAZ_MASK;
5366 break;
5367
5368 case 7:
5369 if (uNewDrX & X86_DR7_MBZ_MASK)
5370 {
5371 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5372 return iemRaiseGeneralProtectionFault0(pIemCpu);
5373 }
5374 uNewDrX |= X86_DR7_RA1_MASK;
5375 uNewDrX &= ~X86_DR7_RAZ_MASK;
5376 break;
5377
5378 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5379 }
5380
5381 /*
5382 * Do the actual setting.
5383 */
5384 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5385 {
5386 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
5387 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc);
5388 }
5389 else
5390 pCtx->dr[iDrReg] = uNewDrX;
5391
5392 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5393 return VINF_SUCCESS;
5394}
5395
5396
5397/**
5398 * Implements 'INVLPG m'.
5399 *
5400 * @param GCPtrPage The effective address of the page to invalidate.
5401 * @remarks Updates the RIP.
5402 */
5403IEM_CIMPL_DEF_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage)
5404{
5405 /* ring-0 only. */
5406 if (pIemCpu->uCpl != 0)
5407 return iemRaiseGeneralProtectionFault0(pIemCpu);
5408 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
5409
5410 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
5411 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5412
5413 if (rc == VINF_SUCCESS)
5414 return VINF_SUCCESS;
5415 if (rc == VINF_PGM_SYNC_CR3)
5416 return iemSetPassUpStatus(pIemCpu, rc);
5417
5418 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
5419 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", GCPtrPage, rc));
5420 return rc;
5421}
5422
5423
5424/**
5425 * Implements RDTSC.
5426 */
5427IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
5428{
5429 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5430
5431 /*
5432 * Check preconditions.
5433 */
5434 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fTsc)
5435 return iemRaiseUndefinedOpcode(pIemCpu);
5436
5437 if ( (pCtx->cr4 & X86_CR4_TSD)
5438 && pIemCpu->uCpl != 0)
5439 {
5440 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
5441 return iemRaiseGeneralProtectionFault0(pIemCpu);
5442 }
5443
5444 /*
5445 * Do the job.
5446 */
5447 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
5448 pCtx->rax = (uint32_t)uTicks;
5449 pCtx->rdx = uTicks >> 32;
5450#ifdef IEM_VERIFICATION_MODE_FULL
5451 pIemCpu->fIgnoreRaxRdx = true;
5452#endif
5453
5454 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5455 return VINF_SUCCESS;
5456}
5457
5458
5459/**
5460 * Implements RDMSR.
5461 */
5462IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
5463{
5464 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5465
5466 /*
5467 * Check preconditions.
5468 */
5469 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMsr)
5470 return iemRaiseUndefinedOpcode(pIemCpu);
5471 if (pIemCpu->uCpl != 0)
5472 return iemRaiseGeneralProtectionFault0(pIemCpu);
5473
5474 /*
5475 * Do the job.
5476 */
5477 RTUINT64U uValue;
5478 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
5479 if (rcStrict == VINF_SUCCESS)
5480 {
5481 pCtx->rax = uValue.s.Lo;
5482 pCtx->rdx = uValue.s.Hi;
5483
5484 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5485 return VINF_SUCCESS;
5486 }
5487
5488#ifndef IN_RING3
5489 /* Deferred to ring-3. */
5490 if (rcStrict == VINF_CPUM_R3_MSR_READ)
5491 {
5492 Log(("IEM: rdmsr(%#x) -> ring-3\n", pCtx->ecx));
5493 return rcStrict;
5494 }
5495#else /* IN_RING3 */
5496 /* Often a unimplemented MSR or MSR bit, so worth logging. */
5497 static uint32_t s_cTimes = 0;
5498 if (s_cTimes++ < 10)
5499 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
5500 else
5501#endif
5502 Log(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
5503 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
5504 return iemRaiseGeneralProtectionFault0(pIemCpu);
5505}
5506
5507
5508/**
5509 * Implements WRMSR.
5510 */
5511IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
5512{
5513 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5514
5515 /*
5516 * Check preconditions.
5517 */
5518 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMsr)
5519 return iemRaiseUndefinedOpcode(pIemCpu);
5520 if (pIemCpu->uCpl != 0)
5521 return iemRaiseGeneralProtectionFault0(pIemCpu);
5522
5523 /*
5524 * Do the job.
5525 */
5526 RTUINT64U uValue;
5527 uValue.s.Lo = pCtx->eax;
5528 uValue.s.Hi = pCtx->edx;
5529
5530 VBOXSTRICTRC rcStrict;
5531 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5532 rcStrict = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
5533 else
5534 {
5535#ifdef IN_RING3
5536 CPUMCTX CtxTmp = *pCtx;
5537 rcStrict = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
5538 PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
5539 *pCtx = *pCtx2;
5540 *pCtx2 = CtxTmp;
5541#else
5542 AssertReleaseFailedReturn(VERR_IEM_IPE_2);
5543#endif
5544 }
5545 if (rcStrict == VINF_SUCCESS)
5546 {
5547 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5548 return VINF_SUCCESS;
5549 }
5550
5551#ifndef IN_RING3
5552 /* Deferred to ring-3. */
5553 if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
5554 {
5555 Log(("IEM: rdmsr(%#x) -> ring-3\n", pCtx->ecx));
5556 return rcStrict;
5557 }
5558#else /* IN_RING3 */
5559 /* Often a unimplemented MSR or MSR bit, so worth logging. */
5560 static uint32_t s_cTimes = 0;
5561 if (s_cTimes++ < 10)
5562 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
5563 else
5564#endif
5565 Log(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
5566 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
5567 return iemRaiseGeneralProtectionFault0(pIemCpu);
5568}
5569
5570
5571/**
5572 * Implements 'IN eAX, port'.
5573 *
5574 * @param u16Port The source port.
5575 * @param cbReg The register size.
5576 */
5577IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
5578{
5579 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5580
5581 /*
5582 * CPL check
5583 */
5584 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
5585 if (rcStrict != VINF_SUCCESS)
5586 return rcStrict;
5587
5588 /*
5589 * Perform the I/O.
5590 */
5591 uint32_t u32Value;
5592 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5593 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, &u32Value, cbReg);
5594 else
5595 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
5596 if (IOM_SUCCESS(rcStrict))
5597 {
5598 switch (cbReg)
5599 {
5600 case 1: pCtx->al = (uint8_t)u32Value; break;
5601 case 2: pCtx->ax = (uint16_t)u32Value; break;
5602 case 4: pCtx->rax = u32Value; break;
5603 default: AssertFailedReturn(VERR_IEM_IPE_3);
5604 }
5605 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5606 pIemCpu->cPotentialExits++;
5607 if (rcStrict != VINF_SUCCESS)
5608 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
5609 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
5610
5611 /*
5612 * Check for I/O breakpoints.
5613 */
5614 uint32_t const uDr7 = pCtx->dr[7];
5615 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
5616 && X86_DR7_ANY_RW_IO(uDr7)
5617 && (pCtx->cr4 & X86_CR4_DE))
5618 || DBGFBpIsHwIoArmed(IEMCPU_TO_VM(pIemCpu))))
5619 {
5620 rcStrict = DBGFBpCheckIo(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), pCtx, u16Port, cbReg);
5621 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
5622 rcStrict = iemRaiseDebugException(pIemCpu);
5623 }
5624 }
5625
5626 return rcStrict;
5627}
5628
5629
5630/**
5631 * Implements 'IN eAX, DX'.
5632 *
5633 * @param cbReg The register size.
5634 */
5635IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
5636{
5637 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
5638}
5639
5640
5641/**
5642 * Implements 'OUT port, eAX'.
5643 *
5644 * @param u16Port The destination port.
5645 * @param cbReg The register size.
5646 */
5647IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
5648{
5649 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5650
5651 /*
5652 * CPL check
5653 */
5654 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
5655 if (rcStrict != VINF_SUCCESS)
5656 return rcStrict;
5657
5658 /*
5659 * Perform the I/O.
5660 */
5661 uint32_t u32Value;
5662 switch (cbReg)
5663 {
5664 case 1: u32Value = pCtx->al; break;
5665 case 2: u32Value = pCtx->ax; break;
5666 case 4: u32Value = pCtx->eax; break;
5667 default: AssertFailedReturn(VERR_IEM_IPE_4);
5668 }
5669 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5670 rcStrict = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, u32Value, cbReg);
5671 else
5672 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
5673 if (IOM_SUCCESS(rcStrict))
5674 {
5675 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5676 pIemCpu->cPotentialExits++;
5677 if (rcStrict != VINF_SUCCESS)
5678 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
5679 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
5680
5681 /*
5682 * Check for I/O breakpoints.
5683 */
5684 uint32_t const uDr7 = pCtx->dr[7];
5685 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
5686 && X86_DR7_ANY_RW_IO(uDr7)
5687 && (pCtx->cr4 & X86_CR4_DE))
5688 || DBGFBpIsHwIoArmed(IEMCPU_TO_VM(pIemCpu))))
5689 {
5690 rcStrict = DBGFBpCheckIo(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), pCtx, u16Port, cbReg);
5691 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
5692 rcStrict = iemRaiseDebugException(pIemCpu);
5693 }
5694 }
5695 return rcStrict;
5696}
5697
5698
5699/**
5700 * Implements 'OUT DX, eAX'.
5701 *
5702 * @param cbReg The register size.
5703 */
5704IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
5705{
5706 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
5707}
5708
5709
5710/**
5711 * Implements 'CLI'.
5712 */
5713IEM_CIMPL_DEF_0(iemCImpl_cli)
5714{
5715 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5716 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
5717 uint32_t const fEflOld = fEfl;
5718 if (pCtx->cr0 & X86_CR0_PE)
5719 {
5720 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
5721 if (!(fEfl & X86_EFL_VM))
5722 {
5723 if (pIemCpu->uCpl <= uIopl)
5724 fEfl &= ~X86_EFL_IF;
5725 else if ( pIemCpu->uCpl == 3
5726 && (pCtx->cr4 & X86_CR4_PVI) )
5727 fEfl &= ~X86_EFL_VIF;
5728 else
5729 return iemRaiseGeneralProtectionFault0(pIemCpu);
5730 }
5731 /* V8086 */
5732 else if (uIopl == 3)
5733 fEfl &= ~X86_EFL_IF;
5734 else if ( uIopl < 3
5735 && (pCtx->cr4 & X86_CR4_VME) )
5736 fEfl &= ~X86_EFL_VIF;
5737 else
5738 return iemRaiseGeneralProtectionFault0(pIemCpu);
5739 }
5740 /* real mode */
5741 else
5742 fEfl &= ~X86_EFL_IF;
5743
5744 /* Commit. */
5745 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
5746 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5747 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld);
5748 return VINF_SUCCESS;
5749}
5750
5751
5752/**
5753 * Implements 'STI'.
5754 */
5755IEM_CIMPL_DEF_0(iemCImpl_sti)
5756{
5757 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5758 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
5759 uint32_t const fEflOld = fEfl;
5760
5761 if (pCtx->cr0 & X86_CR0_PE)
5762 {
5763 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
5764 if (!(fEfl & X86_EFL_VM))
5765 {
5766 if (pIemCpu->uCpl <= uIopl)
5767 fEfl |= X86_EFL_IF;
5768 else if ( pIemCpu->uCpl == 3
5769 && (pCtx->cr4 & X86_CR4_PVI)
5770 && !(fEfl & X86_EFL_VIP) )
5771 fEfl |= X86_EFL_VIF;
5772 else
5773 return iemRaiseGeneralProtectionFault0(pIemCpu);
5774 }
5775 /* V8086 */
5776 else if (uIopl == 3)
5777 fEfl |= X86_EFL_IF;
5778 else if ( uIopl < 3
5779 && (pCtx->cr4 & X86_CR4_VME)
5780 && !(fEfl & X86_EFL_VIP) )
5781 fEfl |= X86_EFL_VIF;
5782 else
5783 return iemRaiseGeneralProtectionFault0(pIemCpu);
5784 }
5785 /* real mode */
5786 else
5787 fEfl |= X86_EFL_IF;
5788
5789 /* Commit. */
5790 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
5791 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5792 if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
5793 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
5794 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
5795 return VINF_SUCCESS;
5796}
5797
5798
5799/**
5800 * Implements 'HLT'.
5801 */
5802IEM_CIMPL_DEF_0(iemCImpl_hlt)
5803{
5804 if (pIemCpu->uCpl != 0)
5805 return iemRaiseGeneralProtectionFault0(pIemCpu);
5806 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5807 return VINF_EM_HALT;
5808}
5809
5810
5811/**
5812 * Implements 'MONITOR'.
5813 */
5814IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
5815{
5816 /*
5817 * Permission checks.
5818 */
5819 if (pIemCpu->uCpl != 0)
5820 {
5821 Log2(("monitor: CPL != 0\n"));
5822 return iemRaiseUndefinedOpcode(pIemCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
5823 }
5824 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMonitorMWait)
5825 {
5826 Log2(("monitor: Not in CPUID\n"));
5827 return iemRaiseUndefinedOpcode(pIemCpu);
5828 }
5829
5830 /*
5831 * Gather the operands and validate them.
5832 */
5833 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5834 RTGCPTR GCPtrMem = pIemCpu->enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
5835 uint32_t uEcx = pCtx->ecx;
5836 uint32_t uEdx = pCtx->edx;
5837/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
5838 * \#GP first. */
5839 if (uEcx != 0)
5840 {
5841 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx);
5842 return iemRaiseGeneralProtectionFault0(pIemCpu);
5843 }
5844
5845 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
5846 if (rcStrict != VINF_SUCCESS)
5847 return rcStrict;
5848
5849 RTGCPHYS GCPhysMem;
5850 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
5851 if (rcStrict != VINF_SUCCESS)
5852 return rcStrict;
5853
5854 /*
5855 * Call EM to prepare the monitor/wait.
5856 */
5857 rcStrict = EMMonitorWaitPrepare(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rax, pCtx->rcx, pCtx->rdx, GCPhysMem);
5858 Assert(rcStrict == VINF_SUCCESS);
5859
5860 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5861 return rcStrict;
5862}
5863
5864
5865/**
5866 * Implements 'MWAIT'.
5867 */
5868IEM_CIMPL_DEF_0(iemCImpl_mwait)
5869{
5870 /*
5871 * Permission checks.
5872 */
5873 if (pIemCpu->uCpl != 0)
5874 {
5875 Log2(("mwait: CPL != 0\n"));
5876 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
5877 * EFLAGS.VM then.) */
5878 return iemRaiseUndefinedOpcode(pIemCpu);
5879 }
5880 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMonitorMWait)
5881 {
5882 Log2(("mwait: Not in CPUID\n"));
5883 return iemRaiseUndefinedOpcode(pIemCpu);
5884 }
5885
5886 /*
5887 * Gather the operands and validate them.
5888 */
5889 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5890 uint32_t uEax = pCtx->eax;
5891 uint32_t uEcx = pCtx->ecx;
5892 if (uEcx != 0)
5893 {
5894 /* Only supported extension is break on IRQ when IF=0. */
5895 if (uEcx > 1)
5896 {
5897 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
5898 return iemRaiseGeneralProtectionFault0(pIemCpu);
5899 }
5900 uint32_t fMWaitFeatures = 0;
5901 uint32_t uIgnore = 0;
5902 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 5, 0, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
5903 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
5904 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
5905 {
5906 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
5907 return iemRaiseGeneralProtectionFault0(pIemCpu);
5908 }
5909 }
5910
5911 /*
5912 * Call EM to prepare the monitor/wait.
5913 */
5914 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(IEMCPU_TO_VMCPU(pIemCpu), uEax, uEcx);
5915
5916 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5917 return rcStrict;
5918}
5919
5920
5921/**
5922 * Implements 'SWAPGS'.
5923 */
5924IEM_CIMPL_DEF_0(iemCImpl_swapgs)
5925{
5926 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT); /* Caller checks this. */
5927
5928 /*
5929 * Permission checks.
5930 */
5931 if (pIemCpu->uCpl != 0)
5932 {
5933 Log2(("swapgs: CPL != 0\n"));
5934 return iemRaiseUndefinedOpcode(pIemCpu);
5935 }
5936
5937 /*
5938 * Do the job.
5939 */
5940 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5941 uint64_t uOtherGsBase = pCtx->msrKERNELGSBASE;
5942 pCtx->msrKERNELGSBASE = pCtx->gs.u64Base;
5943 pCtx->gs.u64Base = uOtherGsBase;
5944
5945 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5946 return VINF_SUCCESS;
5947}
5948
5949
5950/**
5951 * Implements 'CPUID'.
5952 */
5953IEM_CIMPL_DEF_0(iemCImpl_cpuid)
5954{
5955 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5956
5957 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
5958 pCtx->rax &= UINT32_C(0xffffffff);
5959 pCtx->rbx &= UINT32_C(0xffffffff);
5960 pCtx->rcx &= UINT32_C(0xffffffff);
5961 pCtx->rdx &= UINT32_C(0xffffffff);
5962
5963 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5964 return VINF_SUCCESS;
5965}
5966
5967
5968/**
5969 * Implements 'AAD'.
5970 *
5971 * @param bImm The immediate operand.
5972 */
5973IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
5974{
5975 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5976
5977 uint16_t const ax = pCtx->ax;
5978 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
5979 pCtx->ax = al;
5980 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
5981 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
5982 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
5983
5984 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5985 return VINF_SUCCESS;
5986}
5987
5988
5989/**
5990 * Implements 'AAM'.
5991 *
5992 * @param bImm The immediate operand. Cannot be 0.
5993 */
5994IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
5995{
5996 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5997 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
5998
5999 uint16_t const ax = pCtx->ax;
6000 uint8_t const al = (uint8_t)ax % bImm;
6001 uint8_t const ah = (uint8_t)ax / bImm;
6002 pCtx->ax = (ah << 8) + al;
6003 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
6004 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
6005 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
6006
6007 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6008 return VINF_SUCCESS;
6009}
6010
6011
6012/**
6013 * Implements 'DAA'.
6014 */
6015IEM_CIMPL_DEF_0(iemCImpl_daa)
6016{
6017 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6018
6019 uint8_t const al = pCtx->al;
6020 bool const fCarry = pCtx->eflags.Bits.u1CF;
6021
6022 if ( pCtx->eflags.Bits.u1AF
6023 || (al & 0xf) >= 10)
6024 {
6025 pCtx->al = al + 6;
6026 pCtx->eflags.Bits.u1AF = 1;
6027 }
6028 else
6029 pCtx->eflags.Bits.u1AF = 0;
6030
6031 if (al >= 0x9a || fCarry)
6032 {
6033 pCtx->al += 0x60;
6034 pCtx->eflags.Bits.u1CF = 1;
6035 }
6036 else
6037 pCtx->eflags.Bits.u1CF = 0;
6038
6039 iemHlpUpdateArithEFlagsU8(pIemCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6040 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6041 return VINF_SUCCESS;
6042}
6043
6044
6045/**
6046 * Implements 'DAS'.
6047 */
6048IEM_CIMPL_DEF_0(iemCImpl_das)
6049{
6050 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6051
6052 uint8_t const uInputAL = pCtx->al;
6053 bool const fCarry = pCtx->eflags.Bits.u1CF;
6054
6055 if ( pCtx->eflags.Bits.u1AF
6056 || (uInputAL & 0xf) >= 10)
6057 {
6058 pCtx->eflags.Bits.u1AF = 1;
6059 if (uInputAL < 6)
6060 pCtx->eflags.Bits.u1CF = 1;
6061 pCtx->al = uInputAL - 6;
6062 }
6063 else
6064 {
6065 pCtx->eflags.Bits.u1AF = 0;
6066 pCtx->eflags.Bits.u1CF = 0;
6067 }
6068
6069 if (uInputAL >= 0x9a || fCarry)
6070 {
6071 pCtx->al -= 0x60;
6072 pCtx->eflags.Bits.u1CF = 1;
6073 }
6074
6075 iemHlpUpdateArithEFlagsU8(pIemCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6076 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6077 return VINF_SUCCESS;
6078}
6079
6080
6081
6082
6083/*
6084 * Instantiate the various string operation combinations.
6085 */
6086#define OP_SIZE 8
6087#define ADDR_SIZE 16
6088#include "IEMAllCImplStrInstr.cpp.h"
6089#define OP_SIZE 8
6090#define ADDR_SIZE 32
6091#include "IEMAllCImplStrInstr.cpp.h"
6092#define OP_SIZE 8
6093#define ADDR_SIZE 64
6094#include "IEMAllCImplStrInstr.cpp.h"
6095
6096#define OP_SIZE 16
6097#define ADDR_SIZE 16
6098#include "IEMAllCImplStrInstr.cpp.h"
6099#define OP_SIZE 16
6100#define ADDR_SIZE 32
6101#include "IEMAllCImplStrInstr.cpp.h"
6102#define OP_SIZE 16
6103#define ADDR_SIZE 64
6104#include "IEMAllCImplStrInstr.cpp.h"
6105
6106#define OP_SIZE 32
6107#define ADDR_SIZE 16
6108#include "IEMAllCImplStrInstr.cpp.h"
6109#define OP_SIZE 32
6110#define ADDR_SIZE 32
6111#include "IEMAllCImplStrInstr.cpp.h"
6112#define OP_SIZE 32
6113#define ADDR_SIZE 64
6114#include "IEMAllCImplStrInstr.cpp.h"
6115
6116#define OP_SIZE 64
6117#define ADDR_SIZE 32
6118#include "IEMAllCImplStrInstr.cpp.h"
6119#define OP_SIZE 64
6120#define ADDR_SIZE 64
6121#include "IEMAllCImplStrInstr.cpp.h"
6122
6123
6124/**
6125 * Implements 'XGETBV'.
6126 */
6127IEM_CIMPL_DEF_0(iemCImpl_xgetbv)
6128{
6129 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6130 if (pCtx->cr4 & X86_CR4_OSXSAVE)
6131 {
6132 uint32_t uEcx = pCtx->ecx;
6133 switch (uEcx)
6134 {
6135 case 0:
6136 break;
6137
6138 case 1: /** @todo Implement XCR1 support. */
6139 default:
6140 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx));
6141 return iemRaiseGeneralProtectionFault0(pIemCpu);
6142
6143 }
6144 pCtx->rax = RT_LO_U32(pCtx->aXcr[uEcx]);
6145 pCtx->rdx = RT_HI_U32(pCtx->aXcr[uEcx]);
6146
6147 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6148 return VINF_SUCCESS;
6149 }
6150 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n"));
6151 return iemRaiseUndefinedOpcode(pIemCpu);
6152}
6153
6154
6155/**
6156 * Implements 'XSETBV'.
6157 */
6158IEM_CIMPL_DEF_0(iemCImpl_xsetbv)
6159{
6160 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6161 if (pCtx->cr4 & X86_CR4_OSXSAVE)
6162 {
6163 if (pIemCpu->uCpl == 0)
6164 {
6165 uint32_t uEcx = pCtx->ecx;
6166 uint64_t uNewValue = RT_MAKE_U64(pCtx->eax, pCtx->edx);
6167 switch (uEcx)
6168 {
6169 case 0:
6170 {
6171 int rc = CPUMSetGuestXcr0(IEMCPU_TO_VMCPU(pIemCpu), uNewValue);
6172 if (rc == VINF_SUCCESS)
6173 break;
6174 Assert(rc == VERR_CPUM_RAISE_GP_0);
6175 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
6176 return iemRaiseGeneralProtectionFault0(pIemCpu);
6177 }
6178
6179 case 1: /** @todo Implement XCR1 support. */
6180 default:
6181 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
6182 return iemRaiseGeneralProtectionFault0(pIemCpu);
6183
6184 }
6185
6186 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6187 return VINF_SUCCESS;
6188 }
6189
6190 Log(("xsetbv cpl=%u -> GP(0)\n", pIemCpu->uCpl));
6191 return iemRaiseGeneralProtectionFault0(pIemCpu);
6192 }
6193 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n"));
6194 return iemRaiseUndefinedOpcode(pIemCpu);
6195}
6196
6197
6198
6199/**
6200 * Implements 'FINIT' and 'FNINIT'.
6201 *
6202 * @param fCheckXcpts Whether to check for umasked pending exceptions or
6203 * not.
6204 */
6205IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
6206{
6207 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6208
6209 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
6210 return iemRaiseDeviceNotAvailable(pIemCpu);
6211
6212 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
6213 if (fCheckXcpts && TODO )
6214 return iemRaiseMathFault(pIemCpu);
6215 */
6216
6217 PX86XSAVEAREA pXState = pCtx->CTX_SUFF(pXState);
6218 pXState->x87.FCW = 0x37f;
6219 pXState->x87.FSW = 0;
6220 pXState->x87.FTW = 0x00; /* 0 - empty. */
6221 pXState->x87.FPUDP = 0;
6222 pXState->x87.DS = 0; //??
6223 pXState->x87.Rsrvd2= 0;
6224 pXState->x87.FPUIP = 0;
6225 pXState->x87.CS = 0; //??
6226 pXState->x87.Rsrvd1= 0;
6227 pXState->x87.FOP = 0;
6228
6229 iemHlpUsedFpu(pIemCpu);
6230 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6231 return VINF_SUCCESS;
6232}
6233
6234
6235/**
6236 * Implements 'FXSAVE'.
6237 *
6238 * @param iEffSeg The effective segment.
6239 * @param GCPtrEff The address of the image.
6240 * @param enmEffOpSize The operand size (only REX.W really matters).
6241 */
6242IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
6243{
6244 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6245
6246 /*
6247 * Raise exceptions.
6248 */
6249 if (pCtx->cr0 & X86_CR0_EM)
6250 return iemRaiseUndefinedOpcode(pIemCpu);
6251 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
6252 return iemRaiseDeviceNotAvailable(pIemCpu);
6253 if (GCPtrEff & 15)
6254 {
6255 /** @todo CPU/VM detection possible! \#AC might not be signal for
6256 * all/any misalignment sizes, intel says its an implementation detail. */
6257 if ( (pCtx->cr0 & X86_CR0_AM)
6258 && pCtx->eflags.Bits.u1AC
6259 && pIemCpu->uCpl == 3)
6260 return iemRaiseAlignmentCheckException(pIemCpu);
6261 return iemRaiseGeneralProtectionFault0(pIemCpu);
6262 }
6263
6264 /*
6265 * Access the memory.
6266 */
6267 void *pvMem512;
6268 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6269 if (rcStrict != VINF_SUCCESS)
6270 return rcStrict;
6271 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
6272 PCX86FXSTATE pSrc = &pCtx->CTX_SUFF(pXState)->x87;
6273
6274 /*
6275 * Store the registers.
6276 */
6277 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
6278 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
6279
6280 /* common for all formats */
6281 pDst->FCW = pSrc->FCW;
6282 pDst->FSW = pSrc->FSW;
6283 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
6284 pDst->FOP = pSrc->FOP;
6285 pDst->MXCSR = pSrc->MXCSR;
6286 pDst->MXCSR_MASK = pSrc->MXCSR_MASK;
6287 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
6288 {
6289 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
6290 * them for now... */
6291 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
6292 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
6293 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
6294 pDst->aRegs[i].au32[3] = 0;
6295 }
6296
6297 /* FPU IP, CS, DP and DS. */
6298 pDst->FPUIP = pSrc->FPUIP;
6299 pDst->CS = pSrc->CS;
6300 pDst->FPUDP = pSrc->FPUDP;
6301 pDst->DS = pSrc->DS;
6302 if (enmEffOpSize == IEMMODE_64BIT)
6303 {
6304 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
6305 pDst->Rsrvd1 = pSrc->Rsrvd1;
6306 pDst->Rsrvd2 = pSrc->Rsrvd2;
6307 pDst->au32RsrvdForSoftware[0] = 0;
6308 }
6309 else
6310 {
6311 pDst->Rsrvd1 = 0;
6312 pDst->Rsrvd2 = 0;
6313 pDst->au32RsrvdForSoftware[0] = X86_FXSTATE_RSVD_32BIT_MAGIC;
6314 }
6315
6316 /* XMM registers. */
6317 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
6318 || pIemCpu->enmCpuMode != IEMMODE_64BIT
6319 || pIemCpu->uCpl != 0)
6320 {
6321 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
6322 for (uint32_t i = 0; i < cXmmRegs; i++)
6323 pDst->aXMM[i] = pSrc->aXMM[i];
6324 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
6325 * right? */
6326 }
6327
6328 /*
6329 * Commit the memory.
6330 */
6331 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6332 if (rcStrict != VINF_SUCCESS)
6333 return rcStrict;
6334
6335 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6336 return VINF_SUCCESS;
6337}
6338
6339
6340/**
6341 * Implements 'FXRSTOR'.
6342 *
6343 * @param GCPtrEff The address of the image.
6344 * @param enmEffOpSize The operand size (only REX.W really matters).
6345 */
6346IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
6347{
6348 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6349
6350 /*
6351 * Raise exceptions.
6352 */
6353 if (pCtx->cr0 & X86_CR0_EM)
6354 return iemRaiseUndefinedOpcode(pIemCpu);
6355 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
6356 return iemRaiseDeviceNotAvailable(pIemCpu);
6357 if (GCPtrEff & 15)
6358 {
6359 /** @todo CPU/VM detection possible! \#AC might not be signal for
6360 * all/any misalignment sizes, intel says its an implementation detail. */
6361 if ( (pCtx->cr0 & X86_CR0_AM)
6362 && pCtx->eflags.Bits.u1AC
6363 && pIemCpu->uCpl == 3)
6364 return iemRaiseAlignmentCheckException(pIemCpu);
6365 return iemRaiseGeneralProtectionFault0(pIemCpu);
6366 }
6367
6368 /*
6369 * Access the memory.
6370 */
6371 void *pvMem512;
6372 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
6373 if (rcStrict != VINF_SUCCESS)
6374 return rcStrict;
6375 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
6376 PX86FXSTATE pDst = &pCtx->CTX_SUFF(pXState)->x87;
6377
6378 /*
6379 * Check the state for stuff which will #GP(0).
6380 */
6381 uint32_t const fMXCSR = pSrc->MXCSR;
6382 uint32_t const fMXCSR_MASK = pDst->MXCSR_MASK ? pDst->MXCSR_MASK : UINT32_C(0xffbf);
6383 if (fMXCSR & ~fMXCSR_MASK)
6384 {
6385 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
6386 return iemRaiseGeneralProtectionFault0(pIemCpu);
6387 }
6388
6389 /*
6390 * Load the registers.
6391 */
6392 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
6393 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
6394
6395 /* common for all formats */
6396 pDst->FCW = pSrc->FCW;
6397 pDst->FSW = pSrc->FSW;
6398 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
6399 pDst->FOP = pSrc->FOP;
6400 pDst->MXCSR = fMXCSR;
6401 /* (MXCSR_MASK is read-only) */
6402 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
6403 {
6404 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
6405 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
6406 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
6407 pDst->aRegs[i].au32[3] = 0;
6408 }
6409
6410 /* FPU IP, CS, DP and DS. */
6411 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6412 {
6413 pDst->FPUIP = pSrc->FPUIP;
6414 pDst->CS = pSrc->CS;
6415 pDst->Rsrvd1 = pSrc->Rsrvd1;
6416 pDst->FPUDP = pSrc->FPUDP;
6417 pDst->DS = pSrc->DS;
6418 pDst->Rsrvd2 = pSrc->Rsrvd2;
6419 }
6420 else
6421 {
6422 pDst->FPUIP = pSrc->FPUIP;
6423 pDst->CS = pSrc->CS;
6424 pDst->Rsrvd1 = 0;
6425 pDst->FPUDP = pSrc->FPUDP;
6426 pDst->DS = pSrc->DS;
6427 pDst->Rsrvd2 = 0;
6428 }
6429
6430 /* XMM registers. */
6431 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
6432 || pIemCpu->enmCpuMode != IEMMODE_64BIT
6433 || pIemCpu->uCpl != 0)
6434 {
6435 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
6436 for (uint32_t i = 0; i < cXmmRegs; i++)
6437 pDst->aXMM[i] = pSrc->aXMM[i];
6438 }
6439
6440 /*
6441 * Commit the memory.
6442 */
6443 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
6444 if (rcStrict != VINF_SUCCESS)
6445 return rcStrict;
6446
6447 iemHlpUsedFpu(pIemCpu);
6448 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6449 return VINF_SUCCESS;
6450}
6451
6452
6453/**
6454 * Commmon routine for fnstenv and fnsave.
6455 *
6456 * @param uPtr Where to store the state.
6457 * @param pCtx The CPU context.
6458 */
6459static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
6460{
6461 PCX86FXSTATE pSrcX87 = &pCtx->CTX_SUFF(pXState)->x87;
6462 if (enmEffOpSize == IEMMODE_16BIT)
6463 {
6464 uPtr.pu16[0] = pSrcX87->FCW;
6465 uPtr.pu16[1] = pSrcX87->FSW;
6466 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87);
6467 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6468 {
6469 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
6470 * protected mode or long mode and we save it in real mode? And vice
6471 * versa? And with 32-bit operand size? I think CPU is storing the
6472 * effective address ((CS << 4) + IP) in the offset register and not
6473 * doing any address calculations here. */
6474 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP;
6475 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP;
6476 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP;
6477 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000);
6478 }
6479 else
6480 {
6481 uPtr.pu16[3] = pSrcX87->FPUIP;
6482 uPtr.pu16[4] = pSrcX87->CS;
6483 uPtr.pu16[5] = pSrcX87->FPUDP;
6484 uPtr.pu16[6] = pSrcX87->DS;
6485 }
6486 }
6487 else
6488 {
6489 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
6490 uPtr.pu16[0*2] = pSrcX87->FCW;
6491 uPtr.pu16[1*2] = pSrcX87->FSW;
6492 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87);
6493 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6494 {
6495 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP;
6496 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP;
6497 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP;
6498 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4;
6499 }
6500 else
6501 {
6502 uPtr.pu32[3] = pSrcX87->FPUIP;
6503 uPtr.pu16[4*2] = pSrcX87->CS;
6504 uPtr.pu16[4*2+1]= pSrcX87->FOP;
6505 uPtr.pu32[5] = pSrcX87->FPUDP;
6506 uPtr.pu16[6*2] = pSrcX87->DS;
6507 }
6508 }
6509}
6510
6511
6512/**
6513 * Commmon routine for fldenv and frstor
6514 *
6515 * @param uPtr Where to store the state.
6516 * @param pCtx The CPU context.
6517 */
6518static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
6519{
6520 PX86FXSTATE pDstX87 = &pCtx->CTX_SUFF(pXState)->x87;
6521 if (enmEffOpSize == IEMMODE_16BIT)
6522 {
6523 pDstX87->FCW = uPtr.pu16[0];
6524 pDstX87->FSW = uPtr.pu16[1];
6525 pDstX87->FTW = uPtr.pu16[2];
6526 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6527 {
6528 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
6529 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
6530 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
6531 pDstX87->CS = 0;
6532 pDstX87->Rsrvd1= 0;
6533 pDstX87->DS = 0;
6534 pDstX87->Rsrvd2= 0;
6535 }
6536 else
6537 {
6538 pDstX87->FPUIP = uPtr.pu16[3];
6539 pDstX87->CS = uPtr.pu16[4];
6540 pDstX87->Rsrvd1= 0;
6541 pDstX87->FPUDP = uPtr.pu16[5];
6542 pDstX87->DS = uPtr.pu16[6];
6543 pDstX87->Rsrvd2= 0;
6544 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
6545 }
6546 }
6547 else
6548 {
6549 pDstX87->FCW = uPtr.pu16[0*2];
6550 pDstX87->FSW = uPtr.pu16[1*2];
6551 pDstX87->FTW = uPtr.pu16[2*2];
6552 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6553 {
6554 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
6555 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
6556 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
6557 pDstX87->CS = 0;
6558 pDstX87->Rsrvd1= 0;
6559 pDstX87->DS = 0;
6560 pDstX87->Rsrvd2= 0;
6561 }
6562 else
6563 {
6564 pDstX87->FPUIP = uPtr.pu32[3];
6565 pDstX87->CS = uPtr.pu16[4*2];
6566 pDstX87->Rsrvd1= 0;
6567 pDstX87->FOP = uPtr.pu16[4*2+1];
6568 pDstX87->FPUDP = uPtr.pu32[5];
6569 pDstX87->DS = uPtr.pu16[6*2];
6570 pDstX87->Rsrvd2= 0;
6571 }
6572 }
6573
6574 /* Make adjustments. */
6575 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW);
6576 pDstX87->FCW &= ~X86_FCW_ZERO_MASK;
6577 iemFpuRecalcExceptionStatus(pDstX87);
6578 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
6579 * exceptions are pending after loading the saved state? */
6580}
6581
6582
6583/**
6584 * Implements 'FNSTENV'.
6585 *
6586 * @param enmEffOpSize The operand size (only REX.W really matters).
6587 * @param iEffSeg The effective segment register for @a GCPtrEff.
6588 * @param GCPtrEffDst The address of the image.
6589 */
6590IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
6591{
6592 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6593 RTPTRUNION uPtr;
6594 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
6595 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6596 if (rcStrict != VINF_SUCCESS)
6597 return rcStrict;
6598
6599 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6600
6601 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6602 if (rcStrict != VINF_SUCCESS)
6603 return rcStrict;
6604
6605 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
6606 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6607 return VINF_SUCCESS;
6608}
6609
6610
6611/**
6612 * Implements 'FNSAVE'.
6613 *
6614 * @param GCPtrEffDst The address of the image.
6615 * @param enmEffOpSize The operand size.
6616 */
6617IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
6618{
6619 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6620 RTPTRUNION uPtr;
6621 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
6622 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6623 if (rcStrict != VINF_SUCCESS)
6624 return rcStrict;
6625
6626 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6627 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6628 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
6629 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
6630 {
6631 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0];
6632 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1];
6633 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4];
6634 }
6635
6636 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6637 if (rcStrict != VINF_SUCCESS)
6638 return rcStrict;
6639
6640 /*
6641 * Re-initialize the FPU context.
6642 */
6643 pFpuCtx->FCW = 0x37f;
6644 pFpuCtx->FSW = 0;
6645 pFpuCtx->FTW = 0x00; /* 0 - empty */
6646 pFpuCtx->FPUDP = 0;
6647 pFpuCtx->DS = 0;
6648 pFpuCtx->Rsrvd2= 0;
6649 pFpuCtx->FPUIP = 0;
6650 pFpuCtx->CS = 0;
6651 pFpuCtx->Rsrvd1= 0;
6652 pFpuCtx->FOP = 0;
6653
6654 iemHlpUsedFpu(pIemCpu);
6655 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6656 return VINF_SUCCESS;
6657}
6658
6659
6660
6661/**
6662 * Implements 'FLDENV'.
6663 *
6664 * @param enmEffOpSize The operand size (only REX.W really matters).
6665 * @param iEffSeg The effective segment register for @a GCPtrEff.
6666 * @param GCPtrEffSrc The address of the image.
6667 */
6668IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
6669{
6670 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6671 RTCPTRUNION uPtr;
6672 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
6673 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
6674 if (rcStrict != VINF_SUCCESS)
6675 return rcStrict;
6676
6677 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6678
6679 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
6680 if (rcStrict != VINF_SUCCESS)
6681 return rcStrict;
6682
6683 iemHlpUsedFpu(pIemCpu);
6684 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6685 return VINF_SUCCESS;
6686}
6687
6688
6689/**
6690 * Implements 'FRSTOR'.
6691 *
6692 * @param GCPtrEffSrc The address of the image.
6693 * @param enmEffOpSize The operand size.
6694 */
6695IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
6696{
6697 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6698 RTCPTRUNION uPtr;
6699 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
6700 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
6701 if (rcStrict != VINF_SUCCESS)
6702 return rcStrict;
6703
6704 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6705 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6706 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
6707 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
6708 {
6709 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0];
6710 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1];
6711 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4];
6712 pFpuCtx->aRegs[i].au32[3] = 0;
6713 }
6714
6715 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
6716 if (rcStrict != VINF_SUCCESS)
6717 return rcStrict;
6718
6719 iemHlpUsedFpu(pIemCpu);
6720 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6721 return VINF_SUCCESS;
6722}
6723
6724
6725/**
6726 * Implements 'FLDCW'.
6727 *
6728 * @param u16Fcw The new FCW.
6729 */
6730IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
6731{
6732 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6733
6734 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
6735 /** @todo Testcase: Try see what happens when trying to set undefined bits
6736 * (other than 6 and 7). Currently ignoring them. */
6737 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
6738 * according to FSW. (This is was is currently implemented.) */
6739 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6740 pFpuCtx->FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
6741 iemFpuRecalcExceptionStatus(pFpuCtx);
6742
6743 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
6744 iemHlpUsedFpu(pIemCpu);
6745 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6746 return VINF_SUCCESS;
6747}
6748
6749
6750
6751/**
6752 * Implements the underflow case of fxch.
6753 *
6754 * @param iStReg The other stack register.
6755 */
6756IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
6757{
6758 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6759
6760 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6761 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW);
6762 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
6763 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW));
6764
6765 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
6766 * registers are read as QNaN and then exchanged. This could be
6767 * wrong... */
6768 if (pFpuCtx->FCW & X86_FCW_IM)
6769 {
6770 if (RT_BIT(iReg1) & pFpuCtx->FTW)
6771 {
6772 if (RT_BIT(iReg2) & pFpuCtx->FTW)
6773 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6774 else
6775 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80;
6776 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
6777 }
6778 else
6779 {
6780 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80;
6781 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6782 }
6783 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6784 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
6785 }
6786 else
6787 {
6788 /* raise underflow exception, don't change anything. */
6789 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
6790 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6791 }
6792
6793 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
6794 iemHlpUsedFpu(pIemCpu);
6795 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6796 return VINF_SUCCESS;
6797}
6798
6799
6800/**
6801 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
6802 *
6803 * @param cToAdd 1 or 7.
6804 */
6805IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
6806{
6807 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6808 Assert(iStReg < 8);
6809
6810 /*
6811 * Raise exceptions.
6812 */
6813 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
6814 return iemRaiseDeviceNotAvailable(pIemCpu);
6815
6816 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6817 uint16_t u16Fsw = pFpuCtx->FSW;
6818 if (u16Fsw & X86_FSW_ES)
6819 return iemRaiseMathFault(pIemCpu);
6820
6821 /*
6822 * Check if any of the register accesses causes #SF + #IA.
6823 */
6824 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
6825 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
6826 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
6827 {
6828 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
6829 NOREF(u32Eflags);
6830
6831 pFpuCtx->FSW &= ~X86_FSW_C1;
6832 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
6833 if ( !(u16Fsw & X86_FSW_IE)
6834 || (pFpuCtx->FCW & X86_FCW_IM) )
6835 {
6836 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
6837 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
6838 }
6839 }
6840 else if (pFpuCtx->FCW & X86_FCW_IM)
6841 {
6842 /* Masked underflow. */
6843 pFpuCtx->FSW &= ~X86_FSW_C1;
6844 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6845 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
6846 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
6847 }
6848 else
6849 {
6850 /* Raise underflow - don't touch EFLAGS or TOP. */
6851 pFpuCtx->FSW &= ~X86_FSW_C1;
6852 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6853 fPop = false;
6854 }
6855
6856 /*
6857 * Pop if necessary.
6858 */
6859 if (fPop)
6860 {
6861 pFpuCtx->FTW &= ~RT_BIT(iReg1);
6862 pFpuCtx->FSW &= X86_FSW_TOP_MASK;
6863 pFpuCtx->FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
6864 }
6865
6866 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
6867 iemHlpUsedFpu(pIemCpu);
6868 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6869 return VINF_SUCCESS;
6870}
6871
6872/** @} */
6873
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette