VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 60999

Last change on this file since 60999 was 60881, checked in by vboxsync, 9 years ago

iret-to-v86: only 16-bit eip, thank you.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 236.7 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 60881 2016-05-08 15:35:21Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23
24/**
25 * Worker function for iemHlpCheckPortIOPermission, don't call directly.
26 *
27 * @returns Strict VBox status code.
28 *
29 * @param pIemCpu The IEM per CPU data.
30 * @param pCtx The register context.
31 * @param u16Port The port number.
32 * @param cbOperand The operand size.
33 */
34static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
35{
36 /* The TSS bits we're interested in are the same on 386 and AMD64. */
37 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY);
38 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
39 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap);
40 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64));
41
42 /*
43 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
44 */
45 Assert(!pCtx->tr.Attr.n.u1DescType);
46 if (RT_UNLIKELY( pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY
47 && pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))
48 {
49 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
50 u16Port, cbOperand, pCtx->tr.Attr.n.u4Type, pCtx->tr.Attr.u));
51 return iemRaiseGeneralProtectionFault0(pIemCpu);
52 }
53
54 /*
55 * Read the bitmap offset (may #PF).
56 */
57 uint16_t offBitmap;
58 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pIemCpu, &offBitmap, UINT8_MAX,
59 pCtx->tr.u64Base + RT_OFFSETOF(X86TSS64, offIoBitmap));
60 if (rcStrict != VINF_SUCCESS)
61 {
62 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
63 return rcStrict;
64 }
65
66 /*
67 * The bit range from u16Port to (u16Port + cbOperand - 1), however intel
68 * describes the CPU actually reading two bytes regardless of whether the
69 * bit range crosses a byte boundrary. Thus the + 1 in the test below.
70 */
71 uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap;
72 /** @todo check if real CPUs ensures that offBitmap has a minimum value of
73 * for instance sizeof(X86TSS32). */
74 if (offFirstBit + 1 > pCtx->tr.u32Limit) /* the limit is inclusive */
75 {
76 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
77 offFirstBit, pCtx->tr.u32Limit));
78 return iemRaiseGeneralProtectionFault0(pIemCpu);
79 }
80
81 /*
82 * Read the necessary bits.
83 */
84 /** @todo Test the assertion in the intel manual that the CPU reads two
85 * bytes. The question is how this works wrt to #PF and #GP on the
86 * 2nd byte when it's not required. */
87 uint16_t bmBytes = UINT16_MAX;
88 rcStrict = iemMemFetchSysU16(pIemCpu, &bmBytes, UINT8_MAX, pCtx->tr.u64Base + offFirstBit);
89 if (rcStrict != VINF_SUCCESS)
90 {
91 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
92 return rcStrict;
93 }
94
95 /*
96 * Perform the check.
97 */
98 uint16_t fPortMask = (1 << cbOperand) - 1;
99 bmBytes >>= (u16Port & 7);
100 if (bmBytes & fPortMask)
101 {
102 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
103 u16Port, cbOperand, bmBytes, fPortMask));
104 return iemRaiseGeneralProtectionFault0(pIemCpu);
105 }
106
107 return VINF_SUCCESS;
108}
109
110
111/**
112 * Checks if we are allowed to access the given I/O port, raising the
113 * appropriate exceptions if we aren't (or if the I/O bitmap is not
114 * accessible).
115 *
116 * @returns Strict VBox status code.
117 *
118 * @param pIemCpu The IEM per CPU data.
119 * @param pCtx The register context.
120 * @param u16Port The port number.
121 * @param cbOperand The operand size.
122 */
123DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
124{
125 X86EFLAGS Efl;
126 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
127 if ( (pCtx->cr0 & X86_CR0_PE)
128 && ( pIemCpu->uCpl > Efl.Bits.u2IOPL
129 || Efl.Bits.u1VM) )
130 return iemHlpCheckPortIOPermissionBitmap(pIemCpu, pCtx, u16Port, cbOperand);
131 return VINF_SUCCESS;
132}
133
134
135#if 0
136/**
137 * Calculates the parity bit.
138 *
139 * @returns true if the bit is set, false if not.
140 * @param u8Result The least significant byte of the result.
141 */
142static bool iemHlpCalcParityFlag(uint8_t u8Result)
143{
144 /*
145 * Parity is set if the number of bits in the least significant byte of
146 * the result is even.
147 */
148 uint8_t cBits;
149 cBits = u8Result & 1; /* 0 */
150 u8Result >>= 1;
151 cBits += u8Result & 1;
152 u8Result >>= 1;
153 cBits += u8Result & 1;
154 u8Result >>= 1;
155 cBits += u8Result & 1;
156 u8Result >>= 1;
157 cBits += u8Result & 1; /* 4 */
158 u8Result >>= 1;
159 cBits += u8Result & 1;
160 u8Result >>= 1;
161 cBits += u8Result & 1;
162 u8Result >>= 1;
163 cBits += u8Result & 1;
164 return !(cBits & 1);
165}
166#endif /* not used */
167
168
169/**
170 * Updates the specified flags according to a 8-bit result.
171 *
172 * @param pIemCpu The IEM state of the calling EMT.
173 * @param u8Result The result to set the flags according to.
174 * @param fToUpdate The flags to update.
175 * @param fUndefined The flags that are specified as undefined.
176 */
177static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
178{
179 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
180
181 uint32_t fEFlags = pCtx->eflags.u;
182 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
183 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
184 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
185#ifdef IEM_VERIFICATION_MODE_FULL
186 pIemCpu->fUndefinedEFlags |= fUndefined;
187#endif
188}
189
190
191/**
192 * Helper used by iret.
193 *
194 * @param uCpl The new CPL.
195 * @param pSReg Pointer to the segment register.
196 */
197static void iemHlpAdjustSelectorForNewCpl(PIEMCPU pIemCpu, uint8_t uCpl, PCPUMSELREG pSReg)
198{
199#ifdef VBOX_WITH_RAW_MODE_NOT_R0
200 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
201 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
202#else
203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
204#endif
205
206 if ( uCpl > pSReg->Attr.n.u2Dpl
207 && pSReg->Attr.n.u1DescType /* code or data, not system */
208 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
209 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
210 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, 0);
211}
212
213
214/**
215 * Indicates that we have modified the FPU state.
216 *
217 * @param pIemCpu The IEM state of the calling EMT.
218 */
219DECLINLINE(void) iemHlpUsedFpu(PIEMCPU pIemCpu)
220{
221 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);
222}
223
224/** @} */
225
226/** @name C Implementations
227 * @{
228 */
229
230/**
231 * Implements a 16-bit popa.
232 */
233IEM_CIMPL_DEF_0(iemCImpl_popa_16)
234{
235 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
236 RTGCPTR GCPtrStart = iemRegGetEffRsp(pIemCpu, pCtx);
237 RTGCPTR GCPtrLast = GCPtrStart + 15;
238 VBOXSTRICTRC rcStrict;
239
240 /*
241 * The docs are a bit hard to comprehend here, but it looks like we wrap
242 * around in real mode as long as none of the individual "popa" crosses the
243 * end of the stack segment. In protected mode we check the whole access
244 * in one go. For efficiency, only do the word-by-word thing if we're in
245 * danger of wrapping around.
246 */
247 /** @todo do popa boundary / wrap-around checks. */
248 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
249 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
250 {
251 /* word-by-word */
252 RTUINT64U TmpRsp;
253 TmpRsp.u = pCtx->rsp;
254 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
255 if (rcStrict == VINF_SUCCESS)
256 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
257 if (rcStrict == VINF_SUCCESS)
258 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
259 if (rcStrict == VINF_SUCCESS)
260 {
261 iemRegAddToRspEx(pIemCpu, pCtx, &TmpRsp, 2); /* sp */
262 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
263 }
264 if (rcStrict == VINF_SUCCESS)
265 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
266 if (rcStrict == VINF_SUCCESS)
267 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
268 if (rcStrict == VINF_SUCCESS)
269 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
270 if (rcStrict == VINF_SUCCESS)
271 {
272 pCtx->rsp = TmpRsp.u;
273 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
274 }
275 }
276 else
277 {
278 uint16_t const *pa16Mem = NULL;
279 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
280 if (rcStrict == VINF_SUCCESS)
281 {
282 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
283 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
284 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
285 /* skip sp */
286 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
287 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
288 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
289 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
290 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
291 if (rcStrict == VINF_SUCCESS)
292 {
293 iemRegAddToRsp(pIemCpu, pCtx, 16);
294 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
295 }
296 }
297 }
298 return rcStrict;
299}
300
301
302/**
303 * Implements a 32-bit popa.
304 */
305IEM_CIMPL_DEF_0(iemCImpl_popa_32)
306{
307 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
308 RTGCPTR GCPtrStart = iemRegGetEffRsp(pIemCpu, pCtx);
309 RTGCPTR GCPtrLast = GCPtrStart + 31;
310 VBOXSTRICTRC rcStrict;
311
312 /*
313 * The docs are a bit hard to comprehend here, but it looks like we wrap
314 * around in real mode as long as none of the individual "popa" crosses the
315 * end of the stack segment. In protected mode we check the whole access
316 * in one go. For efficiency, only do the word-by-word thing if we're in
317 * danger of wrapping around.
318 */
319 /** @todo do popa boundary / wrap-around checks. */
320 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
321 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
322 {
323 /* word-by-word */
324 RTUINT64U TmpRsp;
325 TmpRsp.u = pCtx->rsp;
326 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
327 if (rcStrict == VINF_SUCCESS)
328 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
329 if (rcStrict == VINF_SUCCESS)
330 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
331 if (rcStrict == VINF_SUCCESS)
332 {
333 iemRegAddToRspEx(pIemCpu, pCtx, &TmpRsp, 2); /* sp */
334 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
335 }
336 if (rcStrict == VINF_SUCCESS)
337 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
338 if (rcStrict == VINF_SUCCESS)
339 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
340 if (rcStrict == VINF_SUCCESS)
341 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
342 if (rcStrict == VINF_SUCCESS)
343 {
344#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
345 pCtx->rdi &= UINT32_MAX;
346 pCtx->rsi &= UINT32_MAX;
347 pCtx->rbp &= UINT32_MAX;
348 pCtx->rbx &= UINT32_MAX;
349 pCtx->rdx &= UINT32_MAX;
350 pCtx->rcx &= UINT32_MAX;
351 pCtx->rax &= UINT32_MAX;
352#endif
353 pCtx->rsp = TmpRsp.u;
354 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
355 }
356 }
357 else
358 {
359 uint32_t const *pa32Mem;
360 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
361 if (rcStrict == VINF_SUCCESS)
362 {
363 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
364 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
365 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
366 /* skip esp */
367 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
368 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
369 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
370 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
371 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
372 if (rcStrict == VINF_SUCCESS)
373 {
374 iemRegAddToRsp(pIemCpu, pCtx, 32);
375 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
376 }
377 }
378 }
379 return rcStrict;
380}
381
382
383/**
384 * Implements a 16-bit pusha.
385 */
386IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
387{
388 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
389 RTGCPTR GCPtrTop = iemRegGetEffRsp(pIemCpu, pCtx);
390 RTGCPTR GCPtrBottom = GCPtrTop - 15;
391 VBOXSTRICTRC rcStrict;
392
393 /*
394 * The docs are a bit hard to comprehend here, but it looks like we wrap
395 * around in real mode as long as none of the individual "pushd" crosses the
396 * end of the stack segment. In protected mode we check the whole access
397 * in one go. For efficiency, only do the word-by-word thing if we're in
398 * danger of wrapping around.
399 */
400 /** @todo do pusha boundary / wrap-around checks. */
401 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
402 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
403 {
404 /* word-by-word */
405 RTUINT64U TmpRsp;
406 TmpRsp.u = pCtx->rsp;
407 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
408 if (rcStrict == VINF_SUCCESS)
409 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
410 if (rcStrict == VINF_SUCCESS)
411 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
412 if (rcStrict == VINF_SUCCESS)
413 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
414 if (rcStrict == VINF_SUCCESS)
415 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
416 if (rcStrict == VINF_SUCCESS)
417 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
418 if (rcStrict == VINF_SUCCESS)
419 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
420 if (rcStrict == VINF_SUCCESS)
421 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
422 if (rcStrict == VINF_SUCCESS)
423 {
424 pCtx->rsp = TmpRsp.u;
425 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
426 }
427 }
428 else
429 {
430 GCPtrBottom--;
431 uint16_t *pa16Mem = NULL;
432 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
433 if (rcStrict == VINF_SUCCESS)
434 {
435 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
436 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
437 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
438 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
439 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
440 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
441 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
442 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
443 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
444 if (rcStrict == VINF_SUCCESS)
445 {
446 iemRegSubFromRsp(pIemCpu, pCtx, 16);
447 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
448 }
449 }
450 }
451 return rcStrict;
452}
453
454
455/**
456 * Implements a 32-bit pusha.
457 */
458IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
459{
460 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
461 RTGCPTR GCPtrTop = iemRegGetEffRsp(pIemCpu, pCtx);
462 RTGCPTR GCPtrBottom = GCPtrTop - 31;
463 VBOXSTRICTRC rcStrict;
464
465 /*
466 * The docs are a bit hard to comprehend here, but it looks like we wrap
467 * around in real mode as long as none of the individual "pusha" crosses the
468 * end of the stack segment. In protected mode we check the whole access
469 * in one go. For efficiency, only do the word-by-word thing if we're in
470 * danger of wrapping around.
471 */
472 /** @todo do pusha boundary / wrap-around checks. */
473 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
474 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
475 {
476 /* word-by-word */
477 RTUINT64U TmpRsp;
478 TmpRsp.u = pCtx->rsp;
479 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
480 if (rcStrict == VINF_SUCCESS)
481 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
482 if (rcStrict == VINF_SUCCESS)
483 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
484 if (rcStrict == VINF_SUCCESS)
485 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
486 if (rcStrict == VINF_SUCCESS)
487 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
488 if (rcStrict == VINF_SUCCESS)
489 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
490 if (rcStrict == VINF_SUCCESS)
491 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
492 if (rcStrict == VINF_SUCCESS)
493 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
494 if (rcStrict == VINF_SUCCESS)
495 {
496 pCtx->rsp = TmpRsp.u;
497 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
498 }
499 }
500 else
501 {
502 GCPtrBottom--;
503 uint32_t *pa32Mem;
504 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
505 if (rcStrict == VINF_SUCCESS)
506 {
507 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
508 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
509 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
510 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
511 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
512 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
513 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
514 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
515 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
516 if (rcStrict == VINF_SUCCESS)
517 {
518 iemRegSubFromRsp(pIemCpu, pCtx, 32);
519 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
520 }
521 }
522 }
523 return rcStrict;
524}
525
526
527/**
528 * Implements pushf.
529 *
530 *
531 * @param enmEffOpSize The effective operand size.
532 */
533IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
534{
535 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
536
537 /*
538 * If we're in V8086 mode some care is required (which is why we're in
539 * doing this in a C implementation).
540 */
541 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
542 if ( (fEfl & X86_EFL_VM)
543 && X86_EFL_GET_IOPL(fEfl) != 3 )
544 {
545 Assert(pCtx->cr0 & X86_CR0_PE);
546 if ( enmEffOpSize != IEMMODE_16BIT
547 || !(pCtx->cr4 & X86_CR4_VME))
548 return iemRaiseGeneralProtectionFault0(pIemCpu);
549 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
550 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
551 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
552 }
553
554 /*
555 * Ok, clear RF and VM, adjust for ancient CPUs, and push the flags.
556 */
557 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
558
559 VBOXSTRICTRC rcStrict;
560 switch (enmEffOpSize)
561 {
562 case IEMMODE_16BIT:
563 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
564 if (IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_186)
565 fEfl |= UINT16_C(0xf000);
566 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
567 break;
568 case IEMMODE_32BIT:
569 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
570 break;
571 case IEMMODE_64BIT:
572 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
573 break;
574 IEM_NOT_REACHED_DEFAULT_CASE_RET();
575 }
576 if (rcStrict != VINF_SUCCESS)
577 return rcStrict;
578
579 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
580 return VINF_SUCCESS;
581}
582
583
584/**
585 * Implements popf.
586 *
587 * @param enmEffOpSize The effective operand size.
588 */
589IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
590{
591 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
592 uint32_t const fEflOld = IEMMISC_GET_EFL(pIemCpu, pCtx);
593 VBOXSTRICTRC rcStrict;
594 uint32_t fEflNew;
595
596 /*
597 * V8086 is special as usual.
598 */
599 if (fEflOld & X86_EFL_VM)
600 {
601 /*
602 * Almost anything goes if IOPL is 3.
603 */
604 if (X86_EFL_GET_IOPL(fEflOld) == 3)
605 {
606 switch (enmEffOpSize)
607 {
608 case IEMMODE_16BIT:
609 {
610 uint16_t u16Value;
611 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
612 if (rcStrict != VINF_SUCCESS)
613 return rcStrict;
614 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
615 break;
616 }
617 case IEMMODE_32BIT:
618 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
619 if (rcStrict != VINF_SUCCESS)
620 return rcStrict;
621 break;
622 IEM_NOT_REACHED_DEFAULT_CASE_RET();
623 }
624
625 const uint32_t fPopfBits = IEMCPU_TO_VM(pIemCpu)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
626 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
627 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
628 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
629 }
630 /*
631 * Interrupt flag virtualization with CR4.VME=1.
632 */
633 else if ( enmEffOpSize == IEMMODE_16BIT
634 && (pCtx->cr4 & X86_CR4_VME) )
635 {
636 uint16_t u16Value;
637 RTUINT64U TmpRsp;
638 TmpRsp.u = pCtx->rsp;
639 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
640 if (rcStrict != VINF_SUCCESS)
641 return rcStrict;
642
643 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
644 * or before? */
645 if ( ( (u16Value & X86_EFL_IF)
646 && (fEflOld & X86_EFL_VIP))
647 || (u16Value & X86_EFL_TF) )
648 return iemRaiseGeneralProtectionFault0(pIemCpu);
649
650 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
651 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
652 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
653 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
654
655 pCtx->rsp = TmpRsp.u;
656 }
657 else
658 return iemRaiseGeneralProtectionFault0(pIemCpu);
659
660 }
661 /*
662 * Not in V8086 mode.
663 */
664 else
665 {
666 /* Pop the flags. */
667 switch (enmEffOpSize)
668 {
669 case IEMMODE_16BIT:
670 {
671 uint16_t u16Value;
672 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
673 if (rcStrict != VINF_SUCCESS)
674 return rcStrict;
675 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
676
677 /*
678 * Ancient CPU adjustments:
679 * - 8086, 80186, V20/30:
680 * Fixed bits 15:12 bits are not kept correctly internally, mostly for
681 * practical reasons (masking below). We add them when pushing flags.
682 * - 80286:
683 * The NT and IOPL flags cannot be popped from real mode and are
684 * therefore always zero (since a 286 can never exit from PM and
685 * their initial value is zero). This changed on a 386 and can
686 * therefore be used to detect 286 or 386 CPU in real mode.
687 */
688 if ( IEM_GET_TARGET_CPU(pIemCpu) == IEMTARGETCPU_286
689 && !(pCtx->cr0 & X86_CR0_PE) )
690 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL);
691 break;
692 }
693 case IEMMODE_32BIT:
694 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
695 if (rcStrict != VINF_SUCCESS)
696 return rcStrict;
697 break;
698 case IEMMODE_64BIT:
699 {
700 uint64_t u64Value;
701 rcStrict = iemMemStackPopU64(pIemCpu, &u64Value);
702 if (rcStrict != VINF_SUCCESS)
703 return rcStrict;
704 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
705 break;
706 }
707 IEM_NOT_REACHED_DEFAULT_CASE_RET();
708 }
709
710 /* Merge them with the current flags. */
711 const uint32_t fPopfBits = IEMCPU_TO_VM(pIemCpu)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
712 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
713 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
714 || pIemCpu->uCpl == 0)
715 {
716 fEflNew &= fPopfBits;
717 fEflNew |= ~fPopfBits & fEflOld;
718 }
719 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
720 {
721 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
722 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
723 }
724 else
725 {
726 fEflNew &= fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF);
727 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
728 }
729 }
730
731 /*
732 * Commit the flags.
733 */
734 Assert(fEflNew & RT_BIT_32(1));
735 IEMMISC_SET_EFL(pIemCpu, pCtx, fEflNew);
736 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
737
738 return VINF_SUCCESS;
739}
740
741
742/**
743 * Implements an indirect call.
744 *
745 * @param uNewPC The new program counter (RIP) value (loaded from the
746 * operand).
747 * @param enmEffOpSize The effective operand size.
748 */
749IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
750{
751 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
752 uint16_t uOldPC = pCtx->ip + cbInstr;
753 if (uNewPC > pCtx->cs.u32Limit)
754 return iemRaiseGeneralProtectionFault0(pIemCpu);
755
756 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
757 if (rcStrict != VINF_SUCCESS)
758 return rcStrict;
759
760 pCtx->rip = uNewPC;
761 pCtx->eflags.Bits.u1RF = 0;
762 return VINF_SUCCESS;
763}
764
765
766/**
767 * Implements a 16-bit relative call.
768 *
769 * @param offDisp The displacment offset.
770 */
771IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
772{
773 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
774 uint16_t uOldPC = pCtx->ip + cbInstr;
775 uint16_t uNewPC = uOldPC + offDisp;
776 if (uNewPC > pCtx->cs.u32Limit)
777 return iemRaiseGeneralProtectionFault0(pIemCpu);
778
779 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
780 if (rcStrict != VINF_SUCCESS)
781 return rcStrict;
782
783 pCtx->rip = uNewPC;
784 pCtx->eflags.Bits.u1RF = 0;
785 return VINF_SUCCESS;
786}
787
788
789/**
790 * Implements a 32-bit indirect call.
791 *
792 * @param uNewPC The new program counter (RIP) value (loaded from the
793 * operand).
794 * @param enmEffOpSize The effective operand size.
795 */
796IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
797{
798 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
799 uint32_t uOldPC = pCtx->eip + cbInstr;
800 if (uNewPC > pCtx->cs.u32Limit)
801 return iemRaiseGeneralProtectionFault0(pIemCpu);
802
803 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
804 if (rcStrict != VINF_SUCCESS)
805 return rcStrict;
806
807#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE) && defined(VBOX_WITH_CALL_RECORD)
808 /*
809 * CASM hook for recording interesting indirect calls.
810 */
811 if ( !pCtx->eflags.Bits.u1IF
812 && (pCtx->cr0 & X86_CR0_PG)
813 && !CSAMIsEnabled(IEMCPU_TO_VM(pIemCpu))
814 && pIemCpu->uCpl == 0)
815 {
816 EMSTATE enmState = EMGetState(IEMCPU_TO_VMCPU(pIemCpu));
817 if ( enmState == EMSTATE_IEM_THEN_REM
818 || enmState == EMSTATE_IEM
819 || enmState == EMSTATE_REM)
820 CSAMR3RecordCallAddress(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
821 }
822#endif
823
824 pCtx->rip = uNewPC;
825 pCtx->eflags.Bits.u1RF = 0;
826 return VINF_SUCCESS;
827}
828
829
830/**
831 * Implements a 32-bit relative call.
832 *
833 * @param offDisp The displacment offset.
834 */
835IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
836{
837 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
838 uint32_t uOldPC = pCtx->eip + cbInstr;
839 uint32_t uNewPC = uOldPC + offDisp;
840 if (uNewPC > pCtx->cs.u32Limit)
841 return iemRaiseGeneralProtectionFault0(pIemCpu);
842
843 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
844 if (rcStrict != VINF_SUCCESS)
845 return rcStrict;
846
847 pCtx->rip = uNewPC;
848 pCtx->eflags.Bits.u1RF = 0;
849 return VINF_SUCCESS;
850}
851
852
853/**
854 * Implements a 64-bit indirect call.
855 *
856 * @param uNewPC The new program counter (RIP) value (loaded from the
857 * operand).
858 * @param enmEffOpSize The effective operand size.
859 */
860IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
861{
862 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
863 uint64_t uOldPC = pCtx->rip + cbInstr;
864 if (!IEM_IS_CANONICAL(uNewPC))
865 return iemRaiseGeneralProtectionFault0(pIemCpu);
866
867 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
868 if (rcStrict != VINF_SUCCESS)
869 return rcStrict;
870
871 pCtx->rip = uNewPC;
872 pCtx->eflags.Bits.u1RF = 0;
873 return VINF_SUCCESS;
874}
875
876
877/**
878 * Implements a 64-bit relative call.
879 *
880 * @param offDisp The displacment offset.
881 */
882IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
883{
884 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
885 uint64_t uOldPC = pCtx->rip + cbInstr;
886 uint64_t uNewPC = uOldPC + offDisp;
887 if (!IEM_IS_CANONICAL(uNewPC))
888 return iemRaiseNotCanonical(pIemCpu);
889
890 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
891 if (rcStrict != VINF_SUCCESS)
892 return rcStrict;
893
894 pCtx->rip = uNewPC;
895 pCtx->eflags.Bits.u1RF = 0;
896 return VINF_SUCCESS;
897}
898
899
900/**
901 * Implements far jumps and calls thru task segments (TSS).
902 *
903 * @param uSel The selector.
904 * @param enmBranch The kind of branching we're performing.
905 * @param enmEffOpSize The effective operand size.
906 * @param pDesc The descriptor corresponding to @a uSel. The type is
907 * task gate.
908 */
909IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
910{
911#ifndef IEM_IMPLEMENTS_TASKSWITCH
912 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
913#else
914 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
915 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
916 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
917
918 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl
919 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
920 {
921 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
922 pIemCpu->uCpl, (uSel & X86_SEL_RPL)));
923 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
924 }
925
926 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
927 * far calls (see iemCImpl_callf). Most likely in both cases it should be
928 * checked here, need testcases. */
929 if (!pDesc->Legacy.Gen.u1Present)
930 {
931 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel));
932 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
933 }
934
935 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
936 uint32_t uNextEip = pCtx->eip + cbInstr;
937 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
938 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc);
939#endif
940}
941
942
943/**
944 * Implements far jumps and calls thru task gates.
945 *
946 * @param uSel The selector.
947 * @param enmBranch The kind of branching we're performing.
948 * @param enmEffOpSize The effective operand size.
949 * @param pDesc The descriptor corresponding to @a uSel. The type is
950 * task gate.
951 */
952IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
953{
954#ifndef IEM_IMPLEMENTS_TASKSWITCH
955 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
956#else
957 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
958
959 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl
960 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
961 {
962 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
963 pIemCpu->uCpl, (uSel & X86_SEL_RPL)));
964 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
965 }
966
967 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
968 * far calls (see iemCImpl_callf). Most likely in both cases it should be
969 * checked here, need testcases. */
970 if (!pDesc->Legacy.Gen.u1Present)
971 {
972 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel));
973 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
974 }
975
976 /*
977 * Fetch the new TSS descriptor from the GDT.
978 */
979 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel;
980 if (uSelTss & X86_SEL_LDT)
981 {
982 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss));
983 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
984 }
985
986 IEMSELDESC TssDesc;
987 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &TssDesc, uSelTss, X86_XCPT_GP);
988 if (rcStrict != VINF_SUCCESS)
989 return rcStrict;
990
991 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK)
992 {
993 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss,
994 TssDesc.Legacy.Gate.u4Type));
995 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
996 }
997
998 if (!TssDesc.Legacy.Gate.u1Present)
999 {
1000 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss));
1001 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSelTss & X86_SEL_MASK_OFF_RPL);
1002 }
1003
1004 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1005 uint32_t uNextEip = pCtx->eip + cbInstr;
1006 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1007 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc);
1008#endif
1009}
1010
1011
1012/**
1013 * Implements far jumps and calls thru call gates.
1014 *
1015 * @param uSel The selector.
1016 * @param enmBranch The kind of branching we're performing.
1017 * @param enmEffOpSize The effective operand size.
1018 * @param pDesc The descriptor corresponding to @a uSel. The type is
1019 * call gate.
1020 */
1021IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1022{
1023#ifndef IEM_IMPLEMENTS_CALLGATE
1024 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1025#else
1026 /* NB: Far jumps can only do intra-privilege transfers. Far calls support
1027 * inter-privilege calls and are much more complex.
1028 *
1029 * NB: 64-bit call gate has the same type as a 32-bit call gate! If
1030 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
1031 * must be 16-bit or 32-bit.
1032 */
1033 /** @todo: effective operand size is probably irrelevant here, only the
1034 * call gate bitness matters??
1035 */
1036 VBOXSTRICTRC rcStrict;
1037 RTPTRUNION uPtrRet;
1038 uint64_t uNewRsp;
1039 uint64_t uNewRip;
1040 uint64_t u64Base;
1041 uint32_t cbLimit;
1042 RTSEL uNewCS;
1043 IEMSELDESC DescCS;
1044 PCPUMCTX pCtx;
1045
1046 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE);
1047 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1048 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE
1049 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE);
1050
1051 /* Determine the new instruction pointer from the gate descriptor. */
1052 uNewRip = pDesc->Legacy.Gate.u16OffsetLow
1053 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16)
1054 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32);
1055
1056 /* Perform DPL checks on the gate descriptor. */
1057 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl
1058 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1059 {
1060 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1061 pIemCpu->uCpl, (uSel & X86_SEL_RPL)));
1062 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1063 }
1064
1065 /** @todo does this catch NULL selectors, too? */
1066 if (!pDesc->Legacy.Gen.u1Present)
1067 {
1068 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel));
1069 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1070 }
1071
1072 /*
1073 * Fetch the target CS descriptor from the GDT or LDT.
1074 */
1075 uNewCS = pDesc->Legacy.Gate.u16Sel;
1076 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_GP);
1077 if (rcStrict != VINF_SUCCESS)
1078 return rcStrict;
1079
1080 /* Target CS must be a code selector. */
1081 if ( !DescCS.Legacy.Gen.u1DescType
1082 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1083 {
1084 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1085 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1086 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1087 }
1088
1089 /* Privilege checks on target CS. */
1090 if (enmBranch == IEMBRANCH_JUMP)
1091 {
1092 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1093 {
1094 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
1095 {
1096 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1097 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1098 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1099 }
1100 }
1101 else
1102 {
1103 if (DescCS.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
1104 {
1105 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1106 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1107 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1108 }
1109 }
1110 }
1111 else
1112 {
1113 Assert(enmBranch == IEMBRANCH_CALL);
1114 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
1115 {
1116 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1117 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1118 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1119 }
1120 }
1121
1122 /* Additional long mode checks. */
1123 if (IEM_IS_LONG_MODE(pIemCpu))
1124 {
1125 if (!DescCS.Legacy.Gen.u1Long)
1126 {
1127 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS));
1128 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1129 }
1130
1131 /* L vs D. */
1132 if ( DescCS.Legacy.Gen.u1Long
1133 && DescCS.Legacy.Gen.u1DefBig)
1134 {
1135 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS));
1136 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1137 }
1138 }
1139
1140 if (!DescCS.Legacy.Gate.u1Present)
1141 {
1142 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS));
1143 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCS);
1144 }
1145
1146 pCtx = pIemCpu->CTX_SUFF(pCtx);
1147
1148 if (enmBranch == IEMBRANCH_JUMP)
1149 {
1150 /** @todo: This is very similar to regular far jumps; merge! */
1151 /* Jumps are fairly simple... */
1152
1153 /* Chop the high bits off if 16-bit gate (Intel says so). */
1154 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1155 uNewRip = (uint16_t)uNewRip;
1156
1157 /* Limit check for non-long segments. */
1158 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1159 if (DescCS.Legacy.Gen.u1Long)
1160 u64Base = 0;
1161 else
1162 {
1163 if (uNewRip > cbLimit)
1164 {
1165 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
1166 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
1167 }
1168 u64Base = X86DESC_BASE(&DescCS.Legacy);
1169 }
1170
1171 /* Canonical address check. */
1172 if (!IEM_IS_CANONICAL(uNewRip))
1173 {
1174 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1175 return iemRaiseNotCanonical(pIemCpu);
1176 }
1177
1178 /*
1179 * Ok, everything checked out fine. Now set the accessed bit before
1180 * committing the result into CS, CSHID and RIP.
1181 */
1182 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1183 {
1184 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
1185 if (rcStrict != VINF_SUCCESS)
1186 return rcStrict;
1187 /** @todo check what VT-x and AMD-V does. */
1188 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1189 }
1190
1191 /* commit */
1192 pCtx->rip = uNewRip;
1193 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1194 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1195 pCtx->cs.ValidSel = pCtx->cs.Sel;
1196 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1197 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1198 pCtx->cs.u32Limit = cbLimit;
1199 pCtx->cs.u64Base = u64Base;
1200 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1201 }
1202 else
1203 {
1204 Assert(enmBranch == IEMBRANCH_CALL);
1205 /* Calls are much more complicated. */
1206
1207 if (DescCS.Legacy.Gen.u2Dpl < pIemCpu->uCpl)
1208 {
1209 uint16_t offNewStack; /* Offset of new stack in TSS. */
1210 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */
1211 uint8_t uNewCSDpl;
1212 uint8_t cbWords;
1213 RTSEL uNewSS;
1214 RTSEL uOldSS;
1215 uint64_t uOldRsp;
1216 IEMSELDESC DescSS;
1217 RTPTRUNION uPtrTSS;
1218 RTGCPTR GCPtrTSS;
1219 RTPTRUNION uPtrParmWds;
1220 RTGCPTR GCPtrParmWds;
1221
1222 /* More privilege. This is the fun part. */
1223 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */
1224
1225 /*
1226 * Determine new SS:rSP from the TSS.
1227 */
1228 Assert(!pCtx->tr.Attr.n.u1DescType);
1229
1230 /* Figure out where the new stack pointer is stored in the TSS. */
1231 uNewCSDpl = uNewCS & X86_SEL_RPL;
1232 if (!IEM_IS_LONG_MODE(pIemCpu))
1233 {
1234 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1235 {
1236 offNewStack = RT_OFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8;
1237 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0);
1238 }
1239 else
1240 {
1241 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1242 offNewStack = RT_OFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4;
1243 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0);
1244 }
1245 }
1246 else
1247 {
1248 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1249 offNewStack = RT_OFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0);
1250 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0);
1251 }
1252
1253 /* Check against TSS limit. */
1254 if ((uint16_t)(offNewStack + cbNewStack - 1) > pCtx->tr.u32Limit)
1255 {
1256 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pCtx->tr.u32Limit));
1257 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, pCtx->tr.Sel);
1258 }
1259
1260 GCPtrTSS = pCtx->tr.u64Base + offNewStack;
1261 rcStrict = iemMemMap(pIemCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R);
1262 if (rcStrict != VINF_SUCCESS)
1263 {
1264 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1265 return rcStrict;
1266 }
1267
1268 if (!IEM_IS_LONG_MODE(pIemCpu))
1269 {
1270 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1271 {
1272 uNewRsp = uPtrTSS.pu32[0];
1273 uNewSS = uPtrTSS.pu16[2];
1274 }
1275 else
1276 {
1277 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1278 uNewRsp = uPtrTSS.pu16[0];
1279 uNewSS = uPtrTSS.pu16[1];
1280 }
1281 }
1282 else
1283 {
1284 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1285 /* SS will be a NULL selector, but that's valid. */
1286 uNewRsp = uPtrTSS.pu64[0];
1287 uNewSS = uNewCSDpl;
1288 }
1289
1290 /* Done with the TSS now. */
1291 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);
1292 if (rcStrict != VINF_SUCCESS)
1293 {
1294 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1295 return rcStrict;
1296 }
1297
1298 /* Only used outside of long mode. */
1299 cbWords = pDesc->Legacy.Gate.u4ParmCount;
1300
1301 /* If EFER.LMA is 0, there's extra work to do. */
1302 if (!IEM_IS_LONG_MODE(pIemCpu))
1303 {
1304 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
1305 {
1306 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
1307 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS);
1308 }
1309
1310 /* Grab the new SS descriptor. */
1311 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_SS);
1312 if (rcStrict != VINF_SUCCESS)
1313 return rcStrict;
1314
1315 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */
1316 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL))
1317 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl))
1318 {
1319 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n",
1320 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl));
1321 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS);
1322 }
1323
1324 /* Ensure new SS is a writable data segment. */
1325 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
1326 {
1327 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
1328 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS);
1329 }
1330
1331 if (!DescSS.Legacy.Gen.u1Present)
1332 {
1333 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS));
1334 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
1335 }
1336 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1337 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords);
1338 else
1339 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords);
1340 }
1341 else
1342 {
1343 /* Just grab the new (NULL) SS descriptor. */
1344 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_SS);
1345 if (rcStrict != VINF_SUCCESS)
1346 return rcStrict;
1347
1348 cbNewStack = sizeof(uint64_t) * 4;
1349 }
1350
1351 /** @todo: According to Intel, new stack is checked for enough space first,
1352 * then switched. According to AMD, the stack is switched first and
1353 * then pushes might fault!
1354 */
1355
1356 /** @todo: According to AMD, CS is loaded first, then SS.
1357 * According to Intel, it's the other way around!?
1358 */
1359
1360 /** @todo: Intel and AMD disagree on when exactly the CPL changes! */
1361
1362 /* Set the accessed bit before committing new SS. */
1363 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1364 {
1365 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
1366 if (rcStrict != VINF_SUCCESS)
1367 return rcStrict;
1368 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1369 }
1370
1371 /* Remember the old SS:rSP and their linear address. */
1372 uOldSS = pCtx->ss.Sel;
1373 uOldRsp = pCtx->rsp;
1374
1375 GCPtrParmWds = pCtx->ss.u64Base + pCtx->rsp;
1376
1377 /* Commit new SS:rSP. */
1378 pCtx->ss.Sel = uNewSS;
1379 pCtx->ss.ValidSel = uNewSS;
1380 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1381 pCtx->ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);
1382 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1383 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1384 pCtx->rsp = uNewRsp;
1385 pIemCpu->uCpl = uNewCSDpl;
1386 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
1387 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
1388
1389 /* Check new stack - may #SS(NewSS). */
1390 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbNewStack,
1391 &uPtrRet.pv, &uNewRsp);
1392 if (rcStrict != VINF_SUCCESS)
1393 {
1394 Log(("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1395 return rcStrict;
1396 }
1397
1398 if (!IEM_IS_LONG_MODE(pIemCpu))
1399 {
1400 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1401 {
1402 /* Push the old CS:rIP. */
1403 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1404 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1405
1406 /* Map the relevant chunk of the old stack. */
1407 rcStrict = iemMemMap(pIemCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1408 if (rcStrict != VINF_SUCCESS)
1409 {
1410 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1411 return rcStrict;
1412 }
1413
1414 /* Copy the parameter (d)words. */
1415 for (int i = 0; i < cbWords; ++i)
1416 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i];
1417
1418 /* Unmap the old stack. */
1419 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1420 if (rcStrict != VINF_SUCCESS)
1421 {
1422 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1423 return rcStrict;
1424 }
1425
1426 /* Push the old SS:rSP. */
1427 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
1428 uPtrRet.pu32[2 + cbWords + 1] = uOldSS;
1429 }
1430 else
1431 {
1432 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1433
1434 /* Push the old CS:rIP. */
1435 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1436 uPtrRet.pu16[1] = pCtx->cs.Sel;
1437
1438 /* Map the relevant chunk of the old stack. */
1439 rcStrict = iemMemMap(pIemCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1440 if (rcStrict != VINF_SUCCESS)
1441 {
1442 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1443 return rcStrict;
1444 }
1445
1446 /* Copy the parameter words. */
1447 for (int i = 0; i < cbWords; ++i)
1448 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i];
1449
1450 /* Unmap the old stack. */
1451 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1452 if (rcStrict != VINF_SUCCESS)
1453 {
1454 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1455 return rcStrict;
1456 }
1457
1458 /* Push the old SS:rSP. */
1459 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp;
1460 uPtrRet.pu16[2 + cbWords + 1] = uOldSS;
1461 }
1462 }
1463 else
1464 {
1465 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1466
1467 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
1468 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1469 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1470 uPtrRet.pu64[2] = uOldRsp;
1471 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */
1472 }
1473
1474 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1475 if (rcStrict != VINF_SUCCESS)
1476 {
1477 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1478 return rcStrict;
1479 }
1480
1481 /* Chop the high bits off if 16-bit gate (Intel says so). */
1482 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1483 uNewRip = (uint16_t)uNewRip;
1484
1485 /* Limit / canonical check. */
1486 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1487 if (!IEM_IS_LONG_MODE(pIemCpu))
1488 {
1489 if (uNewRip > cbLimit)
1490 {
1491 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1492 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
1493 }
1494 u64Base = X86DESC_BASE(&DescCS.Legacy);
1495 }
1496 else
1497 {
1498 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1499 if (!IEM_IS_CANONICAL(uNewRip))
1500 {
1501 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1502 return iemRaiseNotCanonical(pIemCpu);
1503 }
1504 u64Base = 0;
1505 }
1506
1507 /*
1508 * Now set the accessed bit before
1509 * writing the return address to the stack and committing the result into
1510 * CS, CSHID and RIP.
1511 */
1512 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1513 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1514 {
1515 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
1516 if (rcStrict != VINF_SUCCESS)
1517 return rcStrict;
1518 /** @todo check what VT-x and AMD-V does. */
1519 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1520 }
1521
1522 /* Commit new CS:rIP. */
1523 pCtx->rip = uNewRip;
1524 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1525 pCtx->cs.Sel |= pIemCpu->uCpl;
1526 pCtx->cs.ValidSel = pCtx->cs.Sel;
1527 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1528 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1529 pCtx->cs.u32Limit = cbLimit;
1530 pCtx->cs.u64Base = u64Base;
1531 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1532 }
1533 else
1534 {
1535 /* Same privilege. */
1536 /** @todo: This is very similar to regular far calls; merge! */
1537
1538 /* Check stack first - may #SS(0). */
1539 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
1540 * 16-bit code cause a two or four byte CS to be pushed? */
1541 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1542 IEM_IS_LONG_MODE(pIemCpu) ? 8+8
1543 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
1544 &uPtrRet.pv, &uNewRsp);
1545 if (rcStrict != VINF_SUCCESS)
1546 return rcStrict;
1547
1548 /* Chop the high bits off if 16-bit gate (Intel says so). */
1549 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1550 uNewRip = (uint16_t)uNewRip;
1551
1552 /* Limit / canonical check. */
1553 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1554 if (!IEM_IS_LONG_MODE(pIemCpu))
1555 {
1556 if (uNewRip > cbLimit)
1557 {
1558 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1559 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
1560 }
1561 u64Base = X86DESC_BASE(&DescCS.Legacy);
1562 }
1563 else
1564 {
1565 if (!IEM_IS_CANONICAL(uNewRip))
1566 {
1567 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1568 return iemRaiseNotCanonical(pIemCpu);
1569 }
1570 u64Base = 0;
1571 }
1572
1573 /*
1574 * Now set the accessed bit before
1575 * writing the return address to the stack and committing the result into
1576 * CS, CSHID and RIP.
1577 */
1578 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1579 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1580 {
1581 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
1582 if (rcStrict != VINF_SUCCESS)
1583 return rcStrict;
1584 /** @todo check what VT-x and AMD-V does. */
1585 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1586 }
1587
1588 /* stack */
1589 if (!IEM_IS_LONG_MODE(pIemCpu))
1590 {
1591 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1592 {
1593 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1594 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1595 }
1596 else
1597 {
1598 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1599 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1600 uPtrRet.pu16[1] = pCtx->cs.Sel;
1601 }
1602 }
1603 else
1604 {
1605 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1606 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1607 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1608 }
1609
1610 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1611 if (rcStrict != VINF_SUCCESS)
1612 return rcStrict;
1613
1614 /* commit */
1615 pCtx->rip = uNewRip;
1616 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1617 pCtx->cs.Sel |= pIemCpu->uCpl;
1618 pCtx->cs.ValidSel = pCtx->cs.Sel;
1619 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1620 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1621 pCtx->cs.u32Limit = cbLimit;
1622 pCtx->cs.u64Base = u64Base;
1623 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1624 }
1625 }
1626 pCtx->eflags.Bits.u1RF = 0;
1627 return VINF_SUCCESS;
1628#endif
1629}
1630
1631
1632/**
1633 * Implements far jumps and calls thru system selectors.
1634 *
1635 * @param uSel The selector.
1636 * @param enmBranch The kind of branching we're performing.
1637 * @param enmEffOpSize The effective operand size.
1638 * @param pDesc The descriptor corresponding to @a uSel.
1639 */
1640IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1641{
1642 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1643 Assert((uSel & X86_SEL_MASK_OFF_RPL));
1644
1645 if (IEM_IS_LONG_MODE(pIemCpu))
1646 switch (pDesc->Legacy.Gen.u4Type)
1647 {
1648 case AMD64_SEL_TYPE_SYS_CALL_GATE:
1649 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1650
1651 default:
1652 case AMD64_SEL_TYPE_SYS_LDT:
1653 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
1654 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
1655 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
1656 case AMD64_SEL_TYPE_SYS_INT_GATE:
1657 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1658 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1659 }
1660
1661 switch (pDesc->Legacy.Gen.u4Type)
1662 {
1663 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1664 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1665 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1666
1667 case X86_SEL_TYPE_SYS_TASK_GATE:
1668 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
1669
1670 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1671 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1672 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
1673
1674 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1675 Log(("branch %04x -> busy 286 TSS\n", uSel));
1676 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1677
1678 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1679 Log(("branch %04x -> busy 386 TSS\n", uSel));
1680 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1681
1682 default:
1683 case X86_SEL_TYPE_SYS_LDT:
1684 case X86_SEL_TYPE_SYS_286_INT_GATE:
1685 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1686 case X86_SEL_TYPE_SYS_386_INT_GATE:
1687 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1688 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1689 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1690 }
1691}
1692
1693
1694/**
1695 * Implements far jumps.
1696 *
1697 * @param uSel The selector.
1698 * @param offSeg The segment offset.
1699 * @param enmEffOpSize The effective operand size.
1700 */
1701IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1702{
1703 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1704 NOREF(cbInstr);
1705 Assert(offSeg <= UINT32_MAX);
1706
1707 /*
1708 * Real mode and V8086 mode are easy. The only snag seems to be that
1709 * CS.limit doesn't change and the limit check is done against the current
1710 * limit.
1711 */
1712 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1713 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1714 {
1715 if (offSeg > pCtx->cs.u32Limit)
1716 return iemRaiseGeneralProtectionFault0(pIemCpu);
1717
1718 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
1719 pCtx->rip = offSeg;
1720 else
1721 pCtx->rip = offSeg & UINT16_MAX;
1722 pCtx->cs.Sel = uSel;
1723 pCtx->cs.ValidSel = uSel;
1724 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1725 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1726 pCtx->eflags.Bits.u1RF = 0;
1727 return VINF_SUCCESS;
1728 }
1729
1730 /*
1731 * Protected mode. Need to parse the specified descriptor...
1732 */
1733 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1734 {
1735 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1736 return iemRaiseGeneralProtectionFault0(pIemCpu);
1737 }
1738
1739 /* Fetch the descriptor. */
1740 IEMSELDESC Desc;
1741 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP);
1742 if (rcStrict != VINF_SUCCESS)
1743 return rcStrict;
1744
1745 /* Is it there? */
1746 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
1747 {
1748 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1749 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1750 }
1751
1752 /*
1753 * Deal with it according to its type. We do the standard code selectors
1754 * here and dispatch the system selectors to worker functions.
1755 */
1756 if (!Desc.Legacy.Gen.u1DescType)
1757 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
1758
1759 /* Only code segments. */
1760 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1761 {
1762 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1763 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1764 }
1765
1766 /* L vs D. */
1767 if ( Desc.Legacy.Gen.u1Long
1768 && Desc.Legacy.Gen.u1DefBig
1769 && IEM_IS_LONG_MODE(pIemCpu))
1770 {
1771 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1772 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1773 }
1774
1775 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1776 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1777 {
1778 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1779 {
1780 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1781 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1782 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1783 }
1784 }
1785 else
1786 {
1787 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1788 {
1789 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1790 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1791 }
1792 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1793 {
1794 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1795 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1796 }
1797 }
1798
1799 /* Chop the high bits if 16-bit (Intel says so). */
1800 if (enmEffOpSize == IEMMODE_16BIT)
1801 offSeg &= UINT16_MAX;
1802
1803 /* Limit check. (Should alternatively check for non-canonical addresses
1804 here, but that is ruled out by offSeg being 32-bit, right?) */
1805 uint64_t u64Base;
1806 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1807 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1808 u64Base = 0;
1809 else
1810 {
1811 if (offSeg > cbLimit)
1812 {
1813 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1814 /** @todo: Intel says this is #GP(0)! */
1815 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1816 }
1817 u64Base = X86DESC_BASE(&Desc.Legacy);
1818 }
1819
1820 /*
1821 * Ok, everything checked out fine. Now set the accessed bit before
1822 * committing the result into CS, CSHID and RIP.
1823 */
1824 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1825 {
1826 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1827 if (rcStrict != VINF_SUCCESS)
1828 return rcStrict;
1829 /** @todo check what VT-x and AMD-V does. */
1830 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1831 }
1832
1833 /* commit */
1834 pCtx->rip = offSeg;
1835 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1836 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1837 pCtx->cs.ValidSel = pCtx->cs.Sel;
1838 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1839 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1840 pCtx->cs.u32Limit = cbLimit;
1841 pCtx->cs.u64Base = u64Base;
1842 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1843 pCtx->eflags.Bits.u1RF = 0;
1844 /** @todo check if the hidden bits are loaded correctly for 64-bit
1845 * mode. */
1846 return VINF_SUCCESS;
1847}
1848
1849
1850/**
1851 * Implements far calls.
1852 *
1853 * This very similar to iemCImpl_FarJmp.
1854 *
1855 * @param uSel The selector.
1856 * @param offSeg The segment offset.
1857 * @param enmEffOpSize The operand size (in case we need it).
1858 */
1859IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1860{
1861 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1862 VBOXSTRICTRC rcStrict;
1863 uint64_t uNewRsp;
1864 RTPTRUNION uPtrRet;
1865
1866 /*
1867 * Real mode and V8086 mode are easy. The only snag seems to be that
1868 * CS.limit doesn't change and the limit check is done against the current
1869 * limit.
1870 */
1871 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1872 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1873 {
1874 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1875
1876 /* Check stack first - may #SS(0). */
1877 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1878 &uPtrRet.pv, &uNewRsp);
1879 if (rcStrict != VINF_SUCCESS)
1880 return rcStrict;
1881
1882 /* Check the target address range. */
1883 if (offSeg > UINT32_MAX)
1884 return iemRaiseGeneralProtectionFault0(pIemCpu);
1885
1886 /* Everything is fine, push the return address. */
1887 if (enmEffOpSize == IEMMODE_16BIT)
1888 {
1889 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1890 uPtrRet.pu16[1] = pCtx->cs.Sel;
1891 }
1892 else
1893 {
1894 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1895 uPtrRet.pu16[3] = pCtx->cs.Sel;
1896 }
1897 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1898 if (rcStrict != VINF_SUCCESS)
1899 return rcStrict;
1900
1901 /* Branch. */
1902 pCtx->rip = offSeg;
1903 pCtx->cs.Sel = uSel;
1904 pCtx->cs.ValidSel = uSel;
1905 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1906 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1907 pCtx->eflags.Bits.u1RF = 0;
1908 return VINF_SUCCESS;
1909 }
1910
1911 /*
1912 * Protected mode. Need to parse the specified descriptor...
1913 */
1914 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1915 {
1916 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1917 return iemRaiseGeneralProtectionFault0(pIemCpu);
1918 }
1919
1920 /* Fetch the descriptor. */
1921 IEMSELDESC Desc;
1922 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP);
1923 if (rcStrict != VINF_SUCCESS)
1924 return rcStrict;
1925
1926 /*
1927 * Deal with it according to its type. We do the standard code selectors
1928 * here and dispatch the system selectors to worker functions.
1929 */
1930 if (!Desc.Legacy.Gen.u1DescType)
1931 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1932
1933 /* Only code segments. */
1934 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1935 {
1936 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1937 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1938 }
1939
1940 /* L vs D. */
1941 if ( Desc.Legacy.Gen.u1Long
1942 && Desc.Legacy.Gen.u1DefBig
1943 && IEM_IS_LONG_MODE(pIemCpu))
1944 {
1945 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1946 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1947 }
1948
1949 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1950 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1951 {
1952 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1953 {
1954 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1955 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1956 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1957 }
1958 }
1959 else
1960 {
1961 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1962 {
1963 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1964 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1965 }
1966 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1967 {
1968 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1969 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1970 }
1971 }
1972
1973 /* Is it there? */
1974 if (!Desc.Legacy.Gen.u1Present)
1975 {
1976 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1977 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1978 }
1979
1980 /* Check stack first - may #SS(0). */
1981 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1982 * 16-bit code cause a two or four byte CS to be pushed? */
1983 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1984 enmEffOpSize == IEMMODE_64BIT ? 8+8
1985 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1986 &uPtrRet.pv, &uNewRsp);
1987 if (rcStrict != VINF_SUCCESS)
1988 return rcStrict;
1989
1990 /* Chop the high bits if 16-bit (Intel says so). */
1991 if (enmEffOpSize == IEMMODE_16BIT)
1992 offSeg &= UINT16_MAX;
1993
1994 /* Limit / canonical check. */
1995 uint64_t u64Base;
1996 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1997 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1998 {
1999 if (!IEM_IS_CANONICAL(offSeg))
2000 {
2001 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
2002 return iemRaiseNotCanonical(pIemCpu);
2003 }
2004 u64Base = 0;
2005 }
2006 else
2007 {
2008 if (offSeg > cbLimit)
2009 {
2010 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
2011 /** @todo: Intel says this is #GP(0)! */
2012 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2013 }
2014 u64Base = X86DESC_BASE(&Desc.Legacy);
2015 }
2016
2017 /*
2018 * Now set the accessed bit before
2019 * writing the return address to the stack and committing the result into
2020 * CS, CSHID and RIP.
2021 */
2022 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2023 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2024 {
2025 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2026 if (rcStrict != VINF_SUCCESS)
2027 return rcStrict;
2028 /** @todo check what VT-x and AMD-V does. */
2029 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2030 }
2031
2032 /* stack */
2033 if (enmEffOpSize == IEMMODE_16BIT)
2034 {
2035 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
2036 uPtrRet.pu16[1] = pCtx->cs.Sel;
2037 }
2038 else if (enmEffOpSize == IEMMODE_32BIT)
2039 {
2040 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
2041 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
2042 }
2043 else
2044 {
2045 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
2046 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
2047 }
2048 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
2049 if (rcStrict != VINF_SUCCESS)
2050 return rcStrict;
2051
2052 /* commit */
2053 pCtx->rip = offSeg;
2054 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2055 pCtx->cs.Sel |= pIemCpu->uCpl;
2056 pCtx->cs.ValidSel = pCtx->cs.Sel;
2057 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2058 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2059 pCtx->cs.u32Limit = cbLimit;
2060 pCtx->cs.u64Base = u64Base;
2061 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
2062 pCtx->eflags.Bits.u1RF = 0;
2063 /** @todo check if the hidden bits are loaded correctly for 64-bit
2064 * mode. */
2065 return VINF_SUCCESS;
2066}
2067
2068
2069/**
2070 * Implements retf.
2071 *
2072 * @param enmEffOpSize The effective operand size.
2073 * @param cbPop The amount of arguments to pop from the stack
2074 * (bytes).
2075 */
2076IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2077{
2078 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2079 VBOXSTRICTRC rcStrict;
2080 RTCPTRUNION uPtrFrame;
2081 uint64_t uNewRsp;
2082 uint64_t uNewRip;
2083 uint16_t uNewCs;
2084 NOREF(cbInstr);
2085
2086 /*
2087 * Read the stack values first.
2088 */
2089 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
2090 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
2091 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
2092 if (rcStrict != VINF_SUCCESS)
2093 return rcStrict;
2094 if (enmEffOpSize == IEMMODE_16BIT)
2095 {
2096 uNewRip = uPtrFrame.pu16[0];
2097 uNewCs = uPtrFrame.pu16[1];
2098 }
2099 else if (enmEffOpSize == IEMMODE_32BIT)
2100 {
2101 uNewRip = uPtrFrame.pu32[0];
2102 uNewCs = uPtrFrame.pu16[2];
2103 }
2104 else
2105 {
2106 uNewRip = uPtrFrame.pu64[0];
2107 uNewCs = uPtrFrame.pu16[4];
2108 }
2109
2110 /*
2111 * Real mode and V8086 mode are easy.
2112 */
2113 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2114 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2115 {
2116 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2117 /** @todo check how this is supposed to work if sp=0xfffe. */
2118
2119 /* Check the limit of the new EIP. */
2120 /** @todo Intel pseudo code only does the limit check for 16-bit
2121 * operands, AMD does not make any distinction. What is right? */
2122 if (uNewRip > pCtx->cs.u32Limit)
2123 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2124
2125 /* commit the operation. */
2126 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
2127 if (rcStrict != VINF_SUCCESS)
2128 return rcStrict;
2129 pCtx->rip = uNewRip;
2130 pCtx->cs.Sel = uNewCs;
2131 pCtx->cs.ValidSel = uNewCs;
2132 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2133 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2134 pCtx->eflags.Bits.u1RF = 0;
2135 /** @todo do we load attribs and limit as well? */
2136 if (cbPop)
2137 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2138 return VINF_SUCCESS;
2139 }
2140
2141 /*
2142 * Protected mode is complicated, of course.
2143 */
2144 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2145 {
2146 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
2147 return iemRaiseGeneralProtectionFault0(pIemCpu);
2148 }
2149
2150 /* Fetch the descriptor. */
2151 IEMSELDESC DescCs;
2152 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs, X86_XCPT_GP);
2153 if (rcStrict != VINF_SUCCESS)
2154 return rcStrict;
2155
2156 /* Can only return to a code selector. */
2157 if ( !DescCs.Legacy.Gen.u1DescType
2158 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
2159 {
2160 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
2161 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
2162 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2163 }
2164
2165 /* L vs D. */
2166 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
2167 && DescCs.Legacy.Gen.u1DefBig
2168 && IEM_IS_LONG_MODE(pIemCpu))
2169 {
2170 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
2171 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2172 }
2173
2174 /* DPL/RPL/CPL checks. */
2175 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2176 {
2177 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
2178 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2179 }
2180
2181 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2182 {
2183 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
2184 {
2185 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
2186 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2187 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2188 }
2189 }
2190 else
2191 {
2192 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
2193 {
2194 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
2195 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2196 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2197 }
2198 }
2199
2200 /* Is it there? */
2201 if (!DescCs.Legacy.Gen.u1Present)
2202 {
2203 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
2204 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2205 }
2206
2207 /*
2208 * Return to outer privilege? (We'll typically have entered via a call gate.)
2209 */
2210 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2211 {
2212 /* Read the outer stack pointer stored *after* the parameters. */
2213 RTCPTRUNION uPtrStack;
2214 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
2215 if (rcStrict != VINF_SUCCESS)
2216 return rcStrict;
2217
2218 uPtrStack.pu8 += cbPop; /* Skip the parameters. */
2219
2220 uint16_t uNewOuterSs;
2221 uint64_t uNewOuterRsp;
2222 if (enmEffOpSize == IEMMODE_16BIT)
2223 {
2224 uNewOuterRsp = uPtrStack.pu16[0];
2225 uNewOuterSs = uPtrStack.pu16[1];
2226 }
2227 else if (enmEffOpSize == IEMMODE_32BIT)
2228 {
2229 uNewOuterRsp = uPtrStack.pu32[0];
2230 uNewOuterSs = uPtrStack.pu16[2];
2231 }
2232 else
2233 {
2234 uNewOuterRsp = uPtrStack.pu64[0];
2235 uNewOuterSs = uPtrStack.pu16[4];
2236 }
2237
2238 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
2239 and read the selector. */
2240 IEMSELDESC DescSs;
2241 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
2242 {
2243 if ( !DescCs.Legacy.Gen.u1Long
2244 || (uNewOuterSs & X86_SEL_RPL) == 3)
2245 {
2246 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
2247 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2248 return iemRaiseGeneralProtectionFault0(pIemCpu);
2249 }
2250 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
2251 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
2252 }
2253 else
2254 {
2255 /* Fetch the descriptor for the new stack segment. */
2256 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);
2257 if (rcStrict != VINF_SUCCESS)
2258 return rcStrict;
2259 }
2260
2261 /* Check that RPL of stack and code selectors match. */
2262 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
2263 {
2264 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2265 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2266 }
2267
2268 /* Must be a writable data segment. */
2269 if ( !DescSs.Legacy.Gen.u1DescType
2270 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2271 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2272 {
2273 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
2274 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
2275 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2276 }
2277
2278 /* L vs D. (Not mentioned by intel.) */
2279 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
2280 && DescSs.Legacy.Gen.u1DefBig
2281 && IEM_IS_LONG_MODE(pIemCpu))
2282 {
2283 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
2284 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2285 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2286 }
2287
2288 /* DPL/RPL/CPL checks. */
2289 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2290 {
2291 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
2292 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
2293 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2294 }
2295
2296 /* Is it there? */
2297 if (!DescSs.Legacy.Gen.u1Present)
2298 {
2299 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2300 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2301 }
2302
2303 /* Calc SS limit.*/
2304 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
2305
2306 /* Is RIP canonical or within CS.limit? */
2307 uint64_t u64Base;
2308 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2309
2310 /** @todo Testcase: Is this correct? */
2311 if ( DescCs.Legacy.Gen.u1Long
2312 && IEM_IS_LONG_MODE(pIemCpu) )
2313 {
2314 if (!IEM_IS_CANONICAL(uNewRip))
2315 {
2316 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2317 return iemRaiseNotCanonical(pIemCpu);
2318 }
2319 u64Base = 0;
2320 }
2321 else
2322 {
2323 if (uNewRip > cbLimitCs)
2324 {
2325 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
2326 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
2327 /** @todo: Intel says this is #GP(0)! */
2328 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2329 }
2330 u64Base = X86DESC_BASE(&DescCs.Legacy);
2331 }
2332
2333 /*
2334 * Now set the accessed bit before
2335 * writing the return address to the stack and committing the result into
2336 * CS, CSHID and RIP.
2337 */
2338 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
2339 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2340 {
2341 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2342 if (rcStrict != VINF_SUCCESS)
2343 return rcStrict;
2344 /** @todo check what VT-x and AMD-V does. */
2345 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2346 }
2347 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
2348 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2349 {
2350 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
2351 if (rcStrict != VINF_SUCCESS)
2352 return rcStrict;
2353 /** @todo check what VT-x and AMD-V does. */
2354 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2355 }
2356
2357 /* commit */
2358 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
2359 if (rcStrict != VINF_SUCCESS)
2360 return rcStrict;
2361 if (enmEffOpSize == IEMMODE_16BIT)
2362 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2363 else
2364 pCtx->rip = uNewRip;
2365 pCtx->cs.Sel = uNewCs;
2366 pCtx->cs.ValidSel = uNewCs;
2367 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2368 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2369 pCtx->cs.u32Limit = cbLimitCs;
2370 pCtx->cs.u64Base = u64Base;
2371 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
2372 pCtx->rsp = uNewOuterRsp;
2373 pCtx->ss.Sel = uNewOuterSs;
2374 pCtx->ss.ValidSel = uNewOuterSs;
2375 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2376 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
2377 pCtx->ss.u32Limit = cbLimitSs;
2378 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2379 pCtx->ss.u64Base = 0;
2380 else
2381 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
2382
2383 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
2384 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2385 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2386 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2387 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2388
2389 /** @todo check if the hidden bits are loaded correctly for 64-bit
2390 * mode. */
2391
2392 if (cbPop)
2393 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2394 pCtx->eflags.Bits.u1RF = 0;
2395
2396 /* Done! */
2397 }
2398 /*
2399 * Return to the same privilege level
2400 */
2401 else
2402 {
2403 /* Limit / canonical check. */
2404 uint64_t u64Base;
2405 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2406
2407 /** @todo Testcase: Is this correct? */
2408 if ( DescCs.Legacy.Gen.u1Long
2409 && IEM_IS_LONG_MODE(pIemCpu) )
2410 {
2411 if (!IEM_IS_CANONICAL(uNewRip))
2412 {
2413 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
2414 return iemRaiseNotCanonical(pIemCpu);
2415 }
2416 u64Base = 0;
2417 }
2418 else
2419 {
2420 if (uNewRip > cbLimitCs)
2421 {
2422 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
2423 /** @todo: Intel says this is #GP(0)! */
2424 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2425 }
2426 u64Base = X86DESC_BASE(&DescCs.Legacy);
2427 }
2428
2429 /*
2430 * Now set the accessed bit before
2431 * writing the return address to the stack and committing the result into
2432 * CS, CSHID and RIP.
2433 */
2434 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2435 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2436 {
2437 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2438 if (rcStrict != VINF_SUCCESS)
2439 return rcStrict;
2440 /** @todo check what VT-x and AMD-V does. */
2441 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2442 }
2443
2444 /* commit */
2445 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
2446 if (rcStrict != VINF_SUCCESS)
2447 return rcStrict;
2448 if (enmEffOpSize == IEMMODE_16BIT)
2449 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2450 else
2451 pCtx->rip = uNewRip;
2452 pCtx->cs.Sel = uNewCs;
2453 pCtx->cs.ValidSel = uNewCs;
2454 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2455 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2456 pCtx->cs.u32Limit = cbLimitCs;
2457 pCtx->cs.u64Base = u64Base;
2458 /** @todo check if the hidden bits are loaded correctly for 64-bit
2459 * mode. */
2460 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
2461 if (cbPop)
2462 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2463 pCtx->eflags.Bits.u1RF = 0;
2464 }
2465 return VINF_SUCCESS;
2466}
2467
2468
2469/**
2470 * Implements retn.
2471 *
2472 * We're doing this in C because of the \#GP that might be raised if the popped
2473 * program counter is out of bounds.
2474 *
2475 * @param enmEffOpSize The effective operand size.
2476 * @param cbPop The amount of arguments to pop from the stack
2477 * (bytes).
2478 */
2479IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2480{
2481 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2482 NOREF(cbInstr);
2483
2484 /* Fetch the RSP from the stack. */
2485 VBOXSTRICTRC rcStrict;
2486 RTUINT64U NewRip;
2487 RTUINT64U NewRsp;
2488 NewRsp.u = pCtx->rsp;
2489 switch (enmEffOpSize)
2490 {
2491 case IEMMODE_16BIT:
2492 NewRip.u = 0;
2493 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
2494 break;
2495 case IEMMODE_32BIT:
2496 NewRip.u = 0;
2497 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
2498 break;
2499 case IEMMODE_64BIT:
2500 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
2501 break;
2502 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2503 }
2504 if (rcStrict != VINF_SUCCESS)
2505 return rcStrict;
2506
2507 /* Check the new RSP before loading it. */
2508 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
2509 * of it. The canonical test is performed here and for call. */
2510 if (enmEffOpSize != IEMMODE_64BIT)
2511 {
2512 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
2513 {
2514 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
2515 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2516 }
2517 }
2518 else
2519 {
2520 if (!IEM_IS_CANONICAL(NewRip.u))
2521 {
2522 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
2523 return iemRaiseNotCanonical(pIemCpu);
2524 }
2525 }
2526
2527 /* Commit it. */
2528 pCtx->rip = NewRip.u;
2529 pCtx->rsp = NewRsp.u;
2530 if (cbPop)
2531 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2532 pCtx->eflags.Bits.u1RF = 0;
2533
2534 return VINF_SUCCESS;
2535}
2536
2537
2538/**
2539 * Implements enter.
2540 *
2541 * We're doing this in C because the instruction is insane, even for the
2542 * u8NestingLevel=0 case dealing with the stack is tedious.
2543 *
2544 * @param enmEffOpSize The effective operand size.
2545 */
2546IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
2547{
2548 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2549
2550 /* Push RBP, saving the old value in TmpRbp. */
2551 RTUINT64U NewRsp; NewRsp.u = pCtx->rsp;
2552 RTUINT64U TmpRbp; TmpRbp.u = pCtx->rbp;
2553 RTUINT64U NewRbp;
2554 VBOXSTRICTRC rcStrict;
2555 if (enmEffOpSize == IEMMODE_64BIT)
2556 {
2557 rcStrict = iemMemStackPushU64Ex(pIemCpu, TmpRbp.u, &NewRsp);
2558 NewRbp = NewRsp;
2559 }
2560 else if (enmEffOpSize == IEMMODE_32BIT)
2561 {
2562 rcStrict = iemMemStackPushU32Ex(pIemCpu, TmpRbp.DWords.dw0, &NewRsp);
2563 NewRbp = NewRsp;
2564 }
2565 else
2566 {
2567 rcStrict = iemMemStackPushU16Ex(pIemCpu, TmpRbp.Words.w0, &NewRsp);
2568 NewRbp = TmpRbp;
2569 NewRbp.Words.w0 = NewRsp.Words.w0;
2570 }
2571 if (rcStrict != VINF_SUCCESS)
2572 return rcStrict;
2573
2574 /* Copy the parameters (aka nesting levels by Intel). */
2575 cParameters &= 0x1f;
2576 if (cParameters > 0)
2577 {
2578 switch (enmEffOpSize)
2579 {
2580 case IEMMODE_16BIT:
2581 if (pCtx->ss.Attr.n.u1DefBig)
2582 TmpRbp.DWords.dw0 -= 2;
2583 else
2584 TmpRbp.Words.w0 -= 2;
2585 do
2586 {
2587 uint16_t u16Tmp;
2588 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Tmp, &TmpRbp);
2589 if (rcStrict != VINF_SUCCESS)
2590 break;
2591 rcStrict = iemMemStackPushU16Ex(pIemCpu, u16Tmp, &NewRsp);
2592 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2593 break;
2594
2595 case IEMMODE_32BIT:
2596 if (pCtx->ss.Attr.n.u1DefBig)
2597 TmpRbp.DWords.dw0 -= 4;
2598 else
2599 TmpRbp.Words.w0 -= 4;
2600 do
2601 {
2602 uint32_t u32Tmp;
2603 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Tmp, &TmpRbp);
2604 if (rcStrict != VINF_SUCCESS)
2605 break;
2606 rcStrict = iemMemStackPushU32Ex(pIemCpu, u32Tmp, &NewRsp);
2607 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2608 break;
2609
2610 case IEMMODE_64BIT:
2611 TmpRbp.u -= 8;
2612 do
2613 {
2614 uint64_t u64Tmp;
2615 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Tmp, &TmpRbp);
2616 if (rcStrict != VINF_SUCCESS)
2617 break;
2618 rcStrict = iemMemStackPushU64Ex(pIemCpu, u64Tmp, &NewRsp);
2619 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2620 break;
2621
2622 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2623 }
2624 if (rcStrict != VINF_SUCCESS)
2625 return VINF_SUCCESS;
2626
2627 /* Push the new RBP */
2628 if (enmEffOpSize == IEMMODE_64BIT)
2629 rcStrict = iemMemStackPushU64Ex(pIemCpu, NewRbp.u, &NewRsp);
2630 else if (enmEffOpSize == IEMMODE_32BIT)
2631 rcStrict = iemMemStackPushU32Ex(pIemCpu, NewRbp.DWords.dw0, &NewRsp);
2632 else
2633 rcStrict = iemMemStackPushU16Ex(pIemCpu, NewRbp.Words.w0, &NewRsp);
2634 if (rcStrict != VINF_SUCCESS)
2635 return rcStrict;
2636
2637 }
2638
2639 /* Recalc RSP. */
2640 iemRegSubFromRspEx(pIemCpu, pCtx, &NewRsp, cbFrame);
2641
2642 /** @todo Should probe write access at the new RSP according to AMD. */
2643
2644 /* Commit it. */
2645 pCtx->rbp = NewRbp.u;
2646 pCtx->rsp = NewRsp.u;
2647 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
2648
2649 return VINF_SUCCESS;
2650}
2651
2652
2653
2654/**
2655 * Implements leave.
2656 *
2657 * We're doing this in C because messing with the stack registers is annoying
2658 * since they depends on SS attributes.
2659 *
2660 * @param enmEffOpSize The effective operand size.
2661 */
2662IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
2663{
2664 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2665
2666 /* Calculate the intermediate RSP from RBP and the stack attributes. */
2667 RTUINT64U NewRsp;
2668 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2669 NewRsp.u = pCtx->rbp;
2670 else if (pCtx->ss.Attr.n.u1DefBig)
2671 NewRsp.u = pCtx->ebp;
2672 else
2673 {
2674 /** @todo Check that LEAVE actually preserve the high EBP bits. */
2675 NewRsp.u = pCtx->rsp;
2676 NewRsp.Words.w0 = pCtx->bp;
2677 }
2678
2679 /* Pop RBP according to the operand size. */
2680 VBOXSTRICTRC rcStrict;
2681 RTUINT64U NewRbp;
2682 switch (enmEffOpSize)
2683 {
2684 case IEMMODE_16BIT:
2685 NewRbp.u = pCtx->rbp;
2686 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
2687 break;
2688 case IEMMODE_32BIT:
2689 NewRbp.u = 0;
2690 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
2691 break;
2692 case IEMMODE_64BIT:
2693 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
2694 break;
2695 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2696 }
2697 if (rcStrict != VINF_SUCCESS)
2698 return rcStrict;
2699
2700
2701 /* Commit it. */
2702 pCtx->rbp = NewRbp.u;
2703 pCtx->rsp = NewRsp.u;
2704 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
2705
2706 return VINF_SUCCESS;
2707}
2708
2709
2710/**
2711 * Implements int3 and int XX.
2712 *
2713 * @param u8Int The interrupt vector number.
2714 * @param fIsBpInstr Is it the breakpoint instruction.
2715 */
2716IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
2717{
2718 Assert(pIemCpu->cXcptRecursions == 0);
2719 return iemRaiseXcptOrInt(pIemCpu,
2720 cbInstr,
2721 u8Int,
2722 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
2723 0,
2724 0);
2725}
2726
2727
2728/**
2729 * Implements iret for real mode and V8086 mode.
2730 *
2731 * @param enmEffOpSize The effective operand size.
2732 */
2733IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
2734{
2735 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2736 X86EFLAGS Efl;
2737 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
2738 NOREF(cbInstr);
2739
2740 /*
2741 * iret throws an exception if VME isn't enabled.
2742 */
2743 if ( Efl.Bits.u1VM
2744 && Efl.Bits.u2IOPL != 3
2745 && !(pCtx->cr4 & X86_CR4_VME))
2746 return iemRaiseGeneralProtectionFault0(pIemCpu);
2747
2748 /*
2749 * Do the stack bits, but don't commit RSP before everything checks
2750 * out right.
2751 */
2752 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2753 VBOXSTRICTRC rcStrict;
2754 RTCPTRUNION uFrame;
2755 uint16_t uNewCs;
2756 uint32_t uNewEip;
2757 uint32_t uNewFlags;
2758 uint64_t uNewRsp;
2759 if (enmEffOpSize == IEMMODE_32BIT)
2760 {
2761 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
2762 if (rcStrict != VINF_SUCCESS)
2763 return rcStrict;
2764 uNewEip = uFrame.pu32[0];
2765 if (uNewEip > UINT16_MAX)
2766 return iemRaiseGeneralProtectionFault0(pIemCpu);
2767
2768 uNewCs = (uint16_t)uFrame.pu32[1];
2769 uNewFlags = uFrame.pu32[2];
2770 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2771 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
2772 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
2773 | X86_EFL_ID;
2774 if (IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_386)
2775 uNewFlags &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
2776 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
2777 }
2778 else
2779 {
2780 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
2781 if (rcStrict != VINF_SUCCESS)
2782 return rcStrict;
2783 uNewEip = uFrame.pu16[0];
2784 uNewCs = uFrame.pu16[1];
2785 uNewFlags = uFrame.pu16[2];
2786 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2787 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
2788 uNewFlags |= Efl.u & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF);
2789 /** @todo The intel pseudo code does not indicate what happens to
2790 * reserved flags. We just ignore them. */
2791 /* Ancient CPU adjustments: See iemCImpl_popf. */
2792 if (IEM_GET_TARGET_CPU(pIemCpu) == IEMTARGETCPU_286)
2793 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL);
2794 }
2795 /** @todo Check how this is supposed to work if sp=0xfffe. */
2796 Log7(("iemCImpl_iret_real_v8086: uNewCs=%#06x uNewRip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n",
2797 uNewCs, uNewEip, uNewFlags, uNewRsp));
2798
2799 /*
2800 * Check the limit of the new EIP.
2801 */
2802 /** @todo Only the AMD pseudo code check the limit here, what's
2803 * right? */
2804 if (uNewEip > pCtx->cs.u32Limit)
2805 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2806
2807 /*
2808 * V8086 checks and flag adjustments
2809 */
2810 if (Efl.Bits.u1VM)
2811 {
2812 if (Efl.Bits.u2IOPL == 3)
2813 {
2814 /* Preserve IOPL and clear RF. */
2815 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
2816 uNewFlags |= Efl.u & (X86_EFL_IOPL);
2817 }
2818 else if ( enmEffOpSize == IEMMODE_16BIT
2819 && ( !(uNewFlags & X86_EFL_IF)
2820 || !Efl.Bits.u1VIP )
2821 && !(uNewFlags & X86_EFL_TF) )
2822 {
2823 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
2824 uNewFlags &= ~X86_EFL_VIF;
2825 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
2826 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
2827 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
2828 }
2829 else
2830 return iemRaiseGeneralProtectionFault0(pIemCpu);
2831 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags));
2832 }
2833
2834 /*
2835 * Commit the operation.
2836 */
2837 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
2838 if (rcStrict != VINF_SUCCESS)
2839 return rcStrict;
2840#ifdef DBGFTRACE_ENABLED
2841 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
2842 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewRsp);
2843#endif
2844
2845 pCtx->rip = uNewEip;
2846 pCtx->cs.Sel = uNewCs;
2847 pCtx->cs.ValidSel = uNewCs;
2848 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2849 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2850 /** @todo do we load attribs and limit as well? */
2851 Assert(uNewFlags & X86_EFL_1);
2852 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags);
2853
2854 return VINF_SUCCESS;
2855}
2856
2857
2858/**
2859 * Loads a segment register when entering V8086 mode.
2860 *
2861 * @param pSReg The segment register.
2862 * @param uSeg The segment to load.
2863 */
2864static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
2865{
2866 pSReg->Sel = uSeg;
2867 pSReg->ValidSel = uSeg;
2868 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2869 pSReg->u64Base = (uint32_t)uSeg << 4;
2870 pSReg->u32Limit = 0xffff;
2871 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
2872 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
2873 * IRET'ing to V8086. */
2874}
2875
2876
2877/**
2878 * Implements iret for protected mode returning to V8086 mode.
2879 *
2880 * @param pCtx Pointer to the CPU context.
2881 * @param uNewEip The new EIP.
2882 * @param uNewCs The new CS.
2883 * @param uNewFlags The new EFLAGS.
2884 * @param uNewRsp The RSP after the initial IRET frame.
2885 *
2886 * @note This can only be a 32-bit iret du to the X86_EFL_VM position.
2887 */
2888IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uint16_t, uNewCs,
2889 uint32_t, uNewFlags, uint64_t, uNewRsp)
2890{
2891 /*
2892 * Pop the V8086 specific frame bits off the stack.
2893 */
2894 VBOXSTRICTRC rcStrict;
2895 RTCPTRUNION uFrame;
2896 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 24, &uFrame.pv, &uNewRsp);
2897 if (rcStrict != VINF_SUCCESS)
2898 return rcStrict;
2899 uint32_t uNewEsp = uFrame.pu32[0];
2900 uint16_t uNewSs = uFrame.pu32[1];
2901 uint16_t uNewEs = uFrame.pu32[2];
2902 uint16_t uNewDs = uFrame.pu32[3];
2903 uint16_t uNewFs = uFrame.pu32[4];
2904 uint16_t uNewGs = uFrame.pu32[5];
2905 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2906 if (rcStrict != VINF_SUCCESS)
2907 return rcStrict;
2908
2909 /*
2910 * Commit the operation.
2911 */
2912 uNewFlags &= X86_EFL_LIVE_MASK;
2913 uNewFlags |= X86_EFL_RA1_MASK;
2914#ifdef DBGFTRACE_ENABLED
2915 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
2916 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);
2917#endif
2918
2919 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags);
2920 iemCImplCommonV8086LoadSeg(&pCtx->cs, uNewCs);
2921 iemCImplCommonV8086LoadSeg(&pCtx->ss, uNewSs);
2922 iemCImplCommonV8086LoadSeg(&pCtx->es, uNewEs);
2923 iemCImplCommonV8086LoadSeg(&pCtx->ds, uNewDs);
2924 iemCImplCommonV8086LoadSeg(&pCtx->fs, uNewFs);
2925 iemCImplCommonV8086LoadSeg(&pCtx->gs, uNewGs);
2926 pCtx->rip = (uint16_t)uNewEip;
2927 pCtx->rsp = uNewEsp; /** @todo check this out! */
2928 pIemCpu->uCpl = 3;
2929
2930 return VINF_SUCCESS;
2931}
2932
2933
2934/**
2935 * Implements iret for protected mode returning via a nested task.
2936 *
2937 * @param enmEffOpSize The effective operand size.
2938 */
2939IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
2940{
2941 Log7(("iemCImpl_iret_prot_NestedTask:\n"));
2942#ifndef IEM_IMPLEMENTS_TASKSWITCH
2943 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2944#else
2945 /*
2946 * Read the segment selector in the link-field of the current TSS.
2947 */
2948 RTSEL uSelRet;
2949 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2950 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pIemCpu, &uSelRet, UINT8_MAX, pCtx->tr.u64Base);
2951 if (rcStrict != VINF_SUCCESS)
2952 return rcStrict;
2953
2954 /*
2955 * Fetch the returning task's TSS descriptor from the GDT.
2956 */
2957 if (uSelRet & X86_SEL_LDT)
2958 {
2959 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet));
2960 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet);
2961 }
2962
2963 IEMSELDESC TssDesc;
2964 rcStrict = iemMemFetchSelDesc(pIemCpu, &TssDesc, uSelRet, X86_XCPT_GP);
2965 if (rcStrict != VINF_SUCCESS)
2966 return rcStrict;
2967
2968 if (TssDesc.Legacy.Gate.u1DescType)
2969 {
2970 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet));
2971 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
2972 }
2973
2974 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
2975 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
2976 {
2977 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
2978 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
2979 }
2980
2981 if (!TssDesc.Legacy.Gate.u1Present)
2982 {
2983 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet));
2984 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
2985 }
2986
2987 uint32_t uNextEip = pCtx->eip + cbInstr;
2988 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
2989 0 /* uCr2 */, uSelRet, &TssDesc);
2990#endif
2991}
2992
2993
2994/**
2995 * Implements iret for protected mode
2996 *
2997 * @param enmEffOpSize The effective operand size.
2998 */
2999IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
3000{
3001 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3002 NOREF(cbInstr);
3003 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3004
3005 /*
3006 * Nested task return.
3007 */
3008 if (pCtx->eflags.Bits.u1NT)
3009 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
3010
3011 /*
3012 * Normal return.
3013 *
3014 * Do the stack bits, but don't commit RSP before everything checks
3015 * out right.
3016 */
3017 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3018 VBOXSTRICTRC rcStrict;
3019 RTCPTRUNION uFrame;
3020 uint16_t uNewCs;
3021 uint32_t uNewEip;
3022 uint32_t uNewFlags;
3023 uint64_t uNewRsp;
3024 if (enmEffOpSize == IEMMODE_32BIT)
3025 {
3026 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
3027 if (rcStrict != VINF_SUCCESS)
3028 return rcStrict;
3029 uNewEip = uFrame.pu32[0];
3030 uNewCs = (uint16_t)uFrame.pu32[1];
3031 uNewFlags = uFrame.pu32[2];
3032 }
3033 else
3034 {
3035 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
3036 if (rcStrict != VINF_SUCCESS)
3037 return rcStrict;
3038 uNewEip = uFrame.pu16[0];
3039 uNewCs = uFrame.pu16[1];
3040 uNewFlags = uFrame.pu16[2];
3041 }
3042 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3043 if (rcStrict != VINF_SUCCESS)
3044 return rcStrict;
3045 Log7(("iemCImpl_iret_prot: uNewCs=%#06x uNewEip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n", uNewCs, uNewEip, uNewFlags, uNewRsp));
3046
3047 /*
3048 * We're hopefully not returning to V8086 mode...
3049 */
3050 if ( (uNewFlags & X86_EFL_VM)
3051 && pIemCpu->uCpl == 0)
3052 {
3053 Assert(enmEffOpSize == IEMMODE_32BIT);
3054 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, pCtx, uNewEip, uNewCs, uNewFlags, uNewRsp);
3055 }
3056
3057 /*
3058 * Protected mode.
3059 */
3060 /* Read the CS descriptor. */
3061 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3062 {
3063 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
3064 return iemRaiseGeneralProtectionFault0(pIemCpu);
3065 }
3066
3067 IEMSELDESC DescCS;
3068 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs, X86_XCPT_GP);
3069 if (rcStrict != VINF_SUCCESS)
3070 {
3071 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
3072 return rcStrict;
3073 }
3074
3075 /* Must be a code descriptor. */
3076 if (!DescCS.Legacy.Gen.u1DescType)
3077 {
3078 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3079 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3080 }
3081 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3082 {
3083 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3084 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3085 }
3086
3087#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3088 /* Raw ring-0 and ring-1 compression adjustments for PATM performance tricks and other CS leaks. */
3089 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3090 if (EMIsRawRing0Enabled(pVM) && !HMIsEnabled(pVM))
3091 {
3092 if ((uNewCs & X86_SEL_RPL) == 1)
3093 {
3094 if ( pIemCpu->uCpl == 0
3095 && ( !EMIsRawRing1Enabled(pVM)
3096 || pCtx->cs.Sel == (uNewCs & X86_SEL_MASK_OFF_RPL)) )
3097 {
3098 Log(("iret: Ring-0 compression fix: uNewCS=%#x -> %#x\n", uNewCs, uNewCs & X86_SEL_MASK_OFF_RPL));
3099 uNewCs &= X86_SEL_MASK_OFF_RPL;
3100 }
3101# ifdef LOG_ENABLED
3102 else if (pIemCpu->uCpl <= 1 && EMIsRawRing1Enabled(pVM))
3103 Log(("iret: uNewCs=%#x genuine return to ring-1.\n", uNewCs));
3104# endif
3105 }
3106 else if ( (uNewCs & X86_SEL_RPL) == 2
3107 && EMIsRawRing1Enabled(pVM)
3108 && pIemCpu->uCpl <= 1)
3109 {
3110 Log(("iret: Ring-1 compression fix: uNewCS=%#x -> %#x\n", uNewCs, (uNewCs & X86_SEL_MASK_OFF_RPL) | 1));
3111 uNewCs = (uNewCs & X86_SEL_MASK_OFF_RPL) | 2;
3112 }
3113 }
3114#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
3115
3116
3117 /* Privilege checks. */
3118 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3119 {
3120 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3121 {
3122 Log(("iret %04x:%08x - RPL != DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3123 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3124 }
3125 }
3126 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3127 {
3128 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3129 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3130 }
3131 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
3132 {
3133 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
3134 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3135 }
3136
3137 /* Present? */
3138 if (!DescCS.Legacy.Gen.u1Present)
3139 {
3140 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
3141 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
3142 }
3143
3144 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3145
3146 /*
3147 * Return to outer level?
3148 */
3149 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
3150 {
3151 uint16_t uNewSS;
3152 uint32_t uNewESP;
3153 if (enmEffOpSize == IEMMODE_32BIT)
3154 {
3155 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
3156 if (rcStrict != VINF_SUCCESS)
3157 return rcStrict;
3158/** @todo We might be popping a 32-bit ESP from the IRET frame, but whether
3159 * 16-bit or 32-bit are being loaded into SP depends on the D/B
3160 * bit of the popped SS selector it turns out. */
3161 uNewESP = uFrame.pu32[0];
3162 uNewSS = (uint16_t)uFrame.pu32[1];
3163 }
3164 else
3165 {
3166 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 4, &uFrame.pv, &uNewRsp);
3167 if (rcStrict != VINF_SUCCESS)
3168 return rcStrict;
3169 uNewESP = uFrame.pu16[0];
3170 uNewSS = uFrame.pu16[1];
3171 }
3172 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
3173 if (rcStrict != VINF_SUCCESS)
3174 return rcStrict;
3175 Log7(("iemCImpl_iret_prot: uNewSS=%#06x uNewESP=%#010x\n", uNewSS, uNewESP));
3176
3177 /* Read the SS descriptor. */
3178 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3179 {
3180 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
3181 return iemRaiseGeneralProtectionFault0(pIemCpu);
3182 }
3183
3184 IEMSELDESC DescSS;
3185 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
3186 if (rcStrict != VINF_SUCCESS)
3187 {
3188 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
3189 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
3190 return rcStrict;
3191 }
3192
3193 /* Privilege checks. */
3194 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3195 {
3196 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
3197 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3198 }
3199 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3200 {
3201 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
3202 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
3203 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3204 }
3205
3206 /* Must be a writeable data segment descriptor. */
3207 if (!DescSS.Legacy.Gen.u1DescType)
3208 {
3209 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
3210 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3211 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3212 }
3213 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3214 {
3215 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
3216 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3217 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3218 }
3219
3220 /* Present? */
3221 if (!DescSS.Legacy.Gen.u1Present)
3222 {
3223 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
3224 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
3225 }
3226
3227 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3228
3229 /* Check EIP. */
3230 if (uNewEip > cbLimitCS)
3231 {
3232 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
3233 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
3234 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3235 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3236 }
3237
3238 /*
3239 * Commit the changes, marking CS and SS accessed first since
3240 * that may fail.
3241 */
3242 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3243 {
3244 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
3245 if (rcStrict != VINF_SUCCESS)
3246 return rcStrict;
3247 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3248 }
3249 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3250 {
3251 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3252 if (rcStrict != VINF_SUCCESS)
3253 return rcStrict;
3254 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3255 }
3256
3257 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3258 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3259 if (enmEffOpSize != IEMMODE_16BIT)
3260 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3261 if (pIemCpu->uCpl == 0)
3262 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3263 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
3264 fEFlagsMask |= X86_EFL_IF;
3265 if (IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_386)
3266 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3267 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
3268 fEFlagsNew &= ~fEFlagsMask;
3269 fEFlagsNew |= uNewFlags & fEFlagsMask;
3270#ifdef DBGFTRACE_ENABLED
3271 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
3272 pIemCpu->uCpl, uNewCs & X86_SEL_RPL, pCtx->cs.Sel, pCtx->eip,
3273 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP);
3274#endif
3275
3276 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
3277 pCtx->rip = uNewEip;
3278 pCtx->cs.Sel = uNewCs;
3279 pCtx->cs.ValidSel = uNewCs;
3280 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3281 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3282 pCtx->cs.u32Limit = cbLimitCS;
3283 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3284 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
3285 if (!pCtx->ss.Attr.n.u1DefBig)
3286 pCtx->sp = (uint16_t)uNewESP;
3287 else
3288 pCtx->rsp = uNewESP;
3289 pCtx->ss.Sel = uNewSS;
3290 pCtx->ss.ValidSel = uNewSS;
3291 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3292 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3293 pCtx->ss.u32Limit = cbLimitSs;
3294 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3295
3296 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
3297 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
3298 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
3299 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
3300 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
3301
3302 /* Done! */
3303
3304 }
3305 /*
3306 * Return to the same level.
3307 */
3308 else
3309 {
3310 /* Check EIP. */
3311 if (uNewEip > cbLimitCS)
3312 {
3313 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
3314 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3315 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3316 }
3317
3318 /*
3319 * Commit the changes, marking CS first since it may fail.
3320 */
3321 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3322 {
3323 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
3324 if (rcStrict != VINF_SUCCESS)
3325 return rcStrict;
3326 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3327 }
3328
3329 X86EFLAGS NewEfl;
3330 NewEfl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
3331 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3332 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3333 if (enmEffOpSize != IEMMODE_16BIT)
3334 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3335 if (pIemCpu->uCpl == 0)
3336 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3337 else if (pIemCpu->uCpl <= NewEfl.Bits.u2IOPL)
3338 fEFlagsMask |= X86_EFL_IF;
3339 if (IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_386)
3340 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3341 NewEfl.u &= ~fEFlagsMask;
3342 NewEfl.u |= fEFlagsMask & uNewFlags;
3343#ifdef DBGFTRACE_ENABLED
3344 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
3345 pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip,
3346 uNewCs, uNewEip, uNewFlags, pCtx->ss.Sel, uNewRsp);
3347#endif
3348
3349 IEMMISC_SET_EFL(pIemCpu, pCtx, NewEfl.u);
3350 pCtx->rip = uNewEip;
3351 pCtx->cs.Sel = uNewCs;
3352 pCtx->cs.ValidSel = uNewCs;
3353 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3354 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3355 pCtx->cs.u32Limit = cbLimitCS;
3356 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3357 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
3358 pCtx->rsp = uNewRsp;
3359 /* Done! */
3360 }
3361 return VINF_SUCCESS;
3362}
3363
3364
3365/**
3366 * Implements iret for long mode
3367 *
3368 * @param enmEffOpSize The effective operand size.
3369 */
3370IEM_CIMPL_DEF_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize)
3371{
3372 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3373 NOREF(cbInstr);
3374
3375 /*
3376 * Nested task return is not supported in long mode.
3377 */
3378 if (pCtx->eflags.Bits.u1NT)
3379 {
3380 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pCtx->eflags.u));
3381 return iemRaiseGeneralProtectionFault0(pIemCpu);
3382 }
3383
3384 /*
3385 * Normal return.
3386 *
3387 * Do the stack bits, but don't commit RSP before everything checks
3388 * out right.
3389 */
3390 VBOXSTRICTRC rcStrict;
3391 RTCPTRUNION uFrame;
3392 uint64_t uNewRip;
3393 uint16_t uNewCs;
3394 uint16_t uNewSs;
3395 uint32_t uNewFlags;
3396 uint64_t uNewRsp;
3397 if (enmEffOpSize == IEMMODE_64BIT)
3398 {
3399 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*8, &uFrame.pv, &uNewRsp);
3400 if (rcStrict != VINF_SUCCESS)
3401 return rcStrict;
3402 uNewRip = uFrame.pu64[0];
3403 uNewCs = (uint16_t)uFrame.pu64[1];
3404 uNewFlags = (uint32_t)uFrame.pu64[2];
3405 uNewRsp = uFrame.pu64[3];
3406 uNewSs = (uint16_t)uFrame.pu64[4];
3407 }
3408 else if (enmEffOpSize == IEMMODE_32BIT)
3409 {
3410 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*4, &uFrame.pv, &uNewRsp);
3411 if (rcStrict != VINF_SUCCESS)
3412 return rcStrict;
3413 uNewRip = uFrame.pu32[0];
3414 uNewCs = (uint16_t)uFrame.pu32[1];
3415 uNewFlags = uFrame.pu32[2];
3416 uNewRsp = uFrame.pu32[3];
3417 uNewSs = (uint16_t)uFrame.pu32[4];
3418 }
3419 else
3420 {
3421 Assert(enmEffOpSize == IEMMODE_16BIT);
3422 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*2, &uFrame.pv, &uNewRsp);
3423 if (rcStrict != VINF_SUCCESS)
3424 return rcStrict;
3425 uNewRip = uFrame.pu16[0];
3426 uNewCs = uFrame.pu16[1];
3427 uNewFlags = uFrame.pu16[2];
3428 uNewRsp = uFrame.pu16[3];
3429 uNewSs = uFrame.pu16[4];
3430 }
3431 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3432 if (rcStrict != VINF_SUCCESS)
3433 return rcStrict;
3434 Log7(("iretq stack: cs:rip=%04x:%016RX64 rflags=%016RX64 ss:rsp=%04x:%016RX64\n", uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
3435
3436 /*
3437 * Check stuff.
3438 */
3439 /* Read the CS descriptor. */
3440 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3441 {
3442 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3443 return iemRaiseGeneralProtectionFault0(pIemCpu);
3444 }
3445
3446 IEMSELDESC DescCS;
3447 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs, X86_XCPT_GP);
3448 if (rcStrict != VINF_SUCCESS)
3449 {
3450 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
3451 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3452 return rcStrict;
3453 }
3454
3455 /* Must be a code descriptor. */
3456 if ( !DescCS.Legacy.Gen.u1DescType
3457 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3458 {
3459 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
3460 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3461 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3462 }
3463
3464 /* Privilege checks. */
3465 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
3466 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3467 {
3468 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3469 {
3470 Log(("iret %04x:%016RX64 - RPL != DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3471 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3472 }
3473 }
3474 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3475 {
3476 Log(("iret %04x:%016RX64 - RPL < DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3477 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3478 }
3479 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
3480 {
3481 Log(("iret %04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, pIemCpu->uCpl));
3482 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3483 }
3484
3485 /* Present? */
3486 if (!DescCS.Legacy.Gen.u1Present)
3487 {
3488 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3489 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
3490 }
3491
3492 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3493
3494 /* Read the SS descriptor. */
3495 IEMSELDESC DescSS;
3496 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3497 {
3498 if ( !DescCS.Legacy.Gen.u1Long
3499 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
3500 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
3501 {
3502 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3503 return iemRaiseGeneralProtectionFault0(pIemCpu);
3504 }
3505 DescSS.Legacy.u = 0;
3506 }
3507 else
3508 {
3509 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
3510 if (rcStrict != VINF_SUCCESS)
3511 {
3512 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
3513 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3514 return rcStrict;
3515 }
3516 }
3517
3518 /* Privilege checks. */
3519 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3520 {
3521 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3522 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3523 }
3524
3525 uint32_t cbLimitSs;
3526 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3527 cbLimitSs = UINT32_MAX;
3528 else
3529 {
3530 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3531 {
3532 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
3533 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
3534 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3535 }
3536
3537 /* Must be a writeable data segment descriptor. */
3538 if (!DescSS.Legacy.Gen.u1DescType)
3539 {
3540 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
3541 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3542 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3543 }
3544 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3545 {
3546 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
3547 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3548 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3549 }
3550
3551 /* Present? */
3552 if (!DescSS.Legacy.Gen.u1Present)
3553 {
3554 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3555 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSs);
3556 }
3557 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3558 }
3559
3560 /* Check EIP. */
3561 if (DescCS.Legacy.Gen.u1Long)
3562 {
3563 if (!IEM_IS_CANONICAL(uNewRip))
3564 {
3565 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
3566 uNewCs, uNewRip, uNewSs, uNewRsp));
3567 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3568 }
3569 }
3570 else
3571 {
3572 if (uNewRip > cbLimitCS)
3573 {
3574 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
3575 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
3576 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3577 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3578 }
3579 }
3580
3581 /*
3582 * Commit the changes, marking CS and SS accessed first since
3583 * that may fail.
3584 */
3585 /** @todo where exactly are these actually marked accessed by a real CPU? */
3586 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3587 {
3588 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
3589 if (rcStrict != VINF_SUCCESS)
3590 return rcStrict;
3591 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3592 }
3593 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3594 {
3595 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSs);
3596 if (rcStrict != VINF_SUCCESS)
3597 return rcStrict;
3598 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3599 }
3600
3601 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3602 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3603 if (enmEffOpSize != IEMMODE_16BIT)
3604 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3605 if (pIemCpu->uCpl == 0)
3606 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
3607 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
3608 fEFlagsMask |= X86_EFL_IF;
3609 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
3610 fEFlagsNew &= ~fEFlagsMask;
3611 fEFlagsNew |= uNewFlags & fEFlagsMask;
3612#ifdef DBGFTRACE_ENABLED
3613 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
3614 pIemCpu->uCpl, uNewCpl, pCtx->rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);
3615#endif
3616
3617 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
3618 pCtx->rip = uNewRip;
3619 pCtx->cs.Sel = uNewCs;
3620 pCtx->cs.ValidSel = uNewCs;
3621 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3622 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3623 pCtx->cs.u32Limit = cbLimitCS;
3624 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3625 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
3626 if (pCtx->cs.Attr.n.u1Long || pCtx->cs.Attr.n.u1DefBig)
3627 pCtx->rsp = uNewRsp;
3628 else
3629 pCtx->sp = (uint16_t)uNewRsp;
3630 pCtx->ss.Sel = uNewSs;
3631 pCtx->ss.ValidSel = uNewSs;
3632 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3633 {
3634 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3635 pCtx->ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
3636 pCtx->ss.u32Limit = UINT32_MAX;
3637 pCtx->ss.u64Base = 0;
3638 Log2(("iretq new SS: NULL\n"));
3639 }
3640 else
3641 {
3642 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3643 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3644 pCtx->ss.u32Limit = cbLimitSs;
3645 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3646 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u));
3647 }
3648
3649 if (pIemCpu->uCpl != uNewCpl)
3650 {
3651 pIemCpu->uCpl = uNewCpl;
3652 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->ds);
3653 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->es);
3654 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->fs);
3655 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->gs);
3656 }
3657
3658 return VINF_SUCCESS;
3659}
3660
3661
3662/**
3663 * Implements iret.
3664 *
3665 * @param enmEffOpSize The effective operand size.
3666 */
3667IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
3668{
3669 /*
3670 * First, clear NMI blocking, if any, before causing any exceptions.
3671 */
3672 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3673 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
3674
3675 /*
3676 * Call a mode specific worker.
3677 */
3678 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3679 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
3680 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3681 return IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize);
3682 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
3683}
3684
3685
3686/**
3687 * Implements SYSCALL (AMD and Intel64).
3688 *
3689 * @param enmEffOpSize The effective operand size.
3690 */
3691IEM_CIMPL_DEF_0(iemCImpl_syscall)
3692{
3693 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3694
3695 /*
3696 * Check preconditions.
3697 *
3698 * Note that CPUs described in the documentation may load a few odd values
3699 * into CS and SS than we allow here. This has yet to be checked on real
3700 * hardware.
3701 */
3702 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
3703 {
3704 Log(("syscall: Not enabled in EFER -> #UD\n"));
3705 return iemRaiseUndefinedOpcode(pIemCpu);
3706 }
3707 if (!(pCtx->cr0 & X86_CR0_PE))
3708 {
3709 Log(("syscall: Protected mode is required -> #GP(0)\n"));
3710 return iemRaiseGeneralProtectionFault0(pIemCpu);
3711 }
3712 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !CPUMIsGuestInLongModeEx(pCtx))
3713 {
3714 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3715 return iemRaiseUndefinedOpcode(pIemCpu);
3716 }
3717
3718 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
3719 /** @todo what about LDT selectors? Shouldn't matter, really. */
3720 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
3721 uint16_t uNewSs = uNewCs + 8;
3722 if (uNewCs == 0 || uNewSs == 0)
3723 {
3724 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
3725 return iemRaiseGeneralProtectionFault0(pIemCpu);
3726 }
3727
3728 /* Long mode and legacy mode differs. */
3729 if (CPUMIsGuestInLongModeEx(pCtx))
3730 {
3731 uint64_t uNewRip = pIemCpu->enmCpuMode == IEMMODE_64BIT ? pCtx->msrLSTAR : pCtx-> msrCSTAR;
3732
3733 /* This test isn't in the docs, but I'm not trusting the guys writing
3734 the MSRs to have validated the values as canonical like they should. */
3735 if (!IEM_IS_CANONICAL(uNewRip))
3736 {
3737 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3738 return iemRaiseUndefinedOpcode(pIemCpu);
3739 }
3740
3741 /*
3742 * Commit it.
3743 */
3744 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, uNewRip));
3745 pCtx->rcx = pCtx->rip + cbInstr;
3746 pCtx->rip = uNewRip;
3747
3748 pCtx->rflags.u &= ~X86_EFL_RF;
3749 pCtx->r11 = pCtx->rflags.u;
3750 pCtx->rflags.u &= ~pCtx->msrSFMASK;
3751 pCtx->rflags.u |= X86_EFL_1;
3752
3753 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3754 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3755 }
3756 else
3757 {
3758 /*
3759 * Commit it.
3760 */
3761 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n",
3762 pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, (uint32_t)(pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
3763 pCtx->rcx = pCtx->eip + cbInstr;
3764 pCtx->rip = pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
3765 pCtx->rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
3766
3767 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3768 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3769 }
3770 pCtx->cs.Sel = uNewCs;
3771 pCtx->cs.ValidSel = uNewCs;
3772 pCtx->cs.u64Base = 0;
3773 pCtx->cs.u32Limit = UINT32_MAX;
3774 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3775
3776 pCtx->ss.Sel = uNewSs;
3777 pCtx->ss.ValidSel = uNewSs;
3778 pCtx->ss.u64Base = 0;
3779 pCtx->ss.u32Limit = UINT32_MAX;
3780 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3781
3782 return VINF_SUCCESS;
3783}
3784
3785
3786/**
3787 * Implements SYSRET (AMD and Intel64).
3788 */
3789IEM_CIMPL_DEF_0(iemCImpl_sysret)
3790
3791{
3792 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3793
3794 /*
3795 * Check preconditions.
3796 *
3797 * Note that CPUs described in the documentation may load a few odd values
3798 * into CS and SS than we allow here. This has yet to be checked on real
3799 * hardware.
3800 */
3801 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
3802 {
3803 Log(("sysret: Not enabled in EFER -> #UD\n"));
3804 return iemRaiseUndefinedOpcode(pIemCpu);
3805 }
3806 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !CPUMIsGuestInLongModeEx(pCtx))
3807 {
3808 Log(("sysret: Only available in long mode on intel -> #UD\n"));
3809 return iemRaiseUndefinedOpcode(pIemCpu);
3810 }
3811 if (!(pCtx->cr0 & X86_CR0_PE))
3812 {
3813 Log(("sysret: Protected mode is required -> #GP(0)\n"));
3814 return iemRaiseGeneralProtectionFault0(pIemCpu);
3815 }
3816 if (pIemCpu->uCpl != 0)
3817 {
3818 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", pIemCpu->uCpl));
3819 return iemRaiseGeneralProtectionFault0(pIemCpu);
3820 }
3821
3822 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
3823 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
3824 uint16_t uNewSs = uNewCs + 8;
3825 if (pIemCpu->enmEffOpSize == IEMMODE_64BIT)
3826 uNewCs += 16;
3827 if (uNewCs == 0 || uNewSs == 0)
3828 {
3829 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
3830 return iemRaiseGeneralProtectionFault0(pIemCpu);
3831 }
3832
3833 /*
3834 * Commit it.
3835 */
3836 if (CPUMIsGuestInLongModeEx(pCtx))
3837 {
3838 if (pIemCpu->enmEffOpSize == IEMMODE_64BIT)
3839 {
3840 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n",
3841 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->rcx, pCtx->r11));
3842 /* Note! We disregard intel manual regarding the RCX cananonical
3843 check, ask intel+xen why AMD doesn't do it. */
3844 pCtx->rip = pCtx->rcx;
3845 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
3846 | (3 << X86DESCATTR_DPL_SHIFT);
3847 }
3848 else
3849 {
3850 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n",
3851 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->ecx, pCtx->r11));
3852 pCtx->rip = pCtx->ecx;
3853 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
3854 | (3 << X86DESCATTR_DPL_SHIFT);
3855 }
3856 /** @todo testcase: See what kind of flags we can make SYSRET restore and
3857 * what it really ignores. RF and VM are hinted at being zero, by AMD. */
3858 pCtx->rflags.u = pCtx->r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
3859 pCtx->rflags.u |= X86_EFL_1;
3860 }
3861 else
3862 {
3863 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, pCtx->ecx));
3864 pCtx->rip = pCtx->rcx;
3865 pCtx->rflags.u |= X86_EFL_IF;
3866 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
3867 | (3 << X86DESCATTR_DPL_SHIFT);
3868 }
3869 pCtx->cs.Sel = uNewCs | 3;
3870 pCtx->cs.ValidSel = uNewCs | 3;
3871 pCtx->cs.u64Base = 0;
3872 pCtx->cs.u32Limit = UINT32_MAX;
3873 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3874
3875 pCtx->ss.Sel = uNewSs | 3;
3876 pCtx->ss.ValidSel = uNewSs | 3;
3877 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3878 /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */
3879 pCtx->ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
3880 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
3881 * on sysret. */
3882
3883 return VINF_SUCCESS;
3884}
3885
3886
3887/**
3888 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
3889 *
3890 * @param iSegReg The segment register number (valid).
3891 * @param uSel The new selector value.
3892 */
3893IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
3894{
3895 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
3896 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
3897 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
3898
3899 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
3900
3901 /*
3902 * Real mode and V8086 mode are easy.
3903 */
3904 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
3905 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3906 {
3907 *pSel = uSel;
3908 pHid->u64Base = (uint32_t)uSel << 4;
3909 pHid->ValidSel = uSel;
3910 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
3911#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
3912 /** @todo Does the CPU actually load limits and attributes in the
3913 * real/V8086 mode segment load case? It doesn't for CS in far
3914 * jumps... Affects unreal mode. */
3915 pHid->u32Limit = 0xffff;
3916 pHid->Attr.u = 0;
3917 pHid->Attr.n.u1Present = 1;
3918 pHid->Attr.n.u1DescType = 1;
3919 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
3920 ? X86_SEL_TYPE_RW
3921 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
3922#endif
3923 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
3924 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
3925 return VINF_SUCCESS;
3926 }
3927
3928 /*
3929 * Protected mode.
3930 *
3931 * Check if it's a null segment selector value first, that's OK for DS, ES,
3932 * FS and GS. If not null, then we have to load and parse the descriptor.
3933 */
3934 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3935 {
3936 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
3937 if (iSegReg == X86_SREG_SS)
3938 {
3939 /* In 64-bit kernel mode, the stack can be 0 because of the way
3940 interrupts are dispatched. AMD seems to have a slighly more
3941 relaxed relationship to SS.RPL than intel does. */
3942 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
3943 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
3944 || pIemCpu->uCpl > 2
3945 || ( uSel != pIemCpu->uCpl
3946 && !IEM_IS_GUEST_CPU_AMD(pIemCpu)) )
3947 {
3948 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
3949 return iemRaiseGeneralProtectionFault0(pIemCpu);
3950 }
3951 }
3952
3953 *pSel = uSel; /* Not RPL, remember :-) */
3954 iemHlpLoadNullDataSelectorProt(pIemCpu, pHid, uSel);
3955 if (iSegReg == X86_SREG_SS)
3956 pHid->Attr.u |= pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT;
3957
3958 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
3959 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
3960
3961 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
3962 return VINF_SUCCESS;
3963 }
3964
3965 /* Fetch the descriptor. */
3966 IEMSELDESC Desc;
3967 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
3968 if (rcStrict != VINF_SUCCESS)
3969 return rcStrict;
3970
3971 /* Check GPs first. */
3972 if (!Desc.Legacy.Gen.u1DescType)
3973 {
3974 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
3975 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3976 }
3977 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
3978 {
3979 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3980 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3981 {
3982 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
3983 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3984 }
3985 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
3986 {
3987 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
3988 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3989 }
3990 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
3991 {
3992 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3993 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3994 }
3995 }
3996 else
3997 {
3998 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3999 {
4000 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
4001 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4002 }
4003 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4004 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4005 {
4006#if 0 /* this is what intel says. */
4007 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4008 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4009 {
4010 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
4011 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
4012 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4013 }
4014#else /* this is what makes more sense. */
4015 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4016 {
4017 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4018 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4019 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4020 }
4021 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4022 {
4023 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4024 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
4025 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4026 }
4027#endif
4028 }
4029 }
4030
4031 /* Is it there? */
4032 if (!Desc.Legacy.Gen.u1Present)
4033 {
4034 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4035 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
4036 }
4037
4038 /* The base and limit. */
4039 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4040 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
4041
4042 /*
4043 * Ok, everything checked out fine. Now set the accessed bit before
4044 * committing the result into the registers.
4045 */
4046 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4047 {
4048 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
4049 if (rcStrict != VINF_SUCCESS)
4050 return rcStrict;
4051 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4052 }
4053
4054 /* commit */
4055 *pSel = uSel;
4056 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4057 pHid->u32Limit = cbLimit;
4058 pHid->u64Base = u64Base;
4059 pHid->ValidSel = uSel;
4060 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4061
4062 /** @todo check if the hidden bits are loaded correctly for 64-bit
4063 * mode. */
4064 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
4065
4066 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
4067 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4068 return VINF_SUCCESS;
4069}
4070
4071
4072/**
4073 * Implements 'mov SReg, r/m'.
4074 *
4075 * @param iSegReg The segment register number (valid).
4076 * @param uSel The new selector value.
4077 */
4078IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
4079{
4080 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4081 if (rcStrict == VINF_SUCCESS)
4082 {
4083 if (iSegReg == X86_SREG_SS)
4084 {
4085 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4086 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4087 }
4088 }
4089 return rcStrict;
4090}
4091
4092
4093/**
4094 * Implements 'pop SReg'.
4095 *
4096 * @param iSegReg The segment register number (valid).
4097 * @param enmEffOpSize The efficient operand size (valid).
4098 */
4099IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4100{
4101 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4102 VBOXSTRICTRC rcStrict;
4103
4104 /*
4105 * Read the selector off the stack and join paths with mov ss, reg.
4106 */
4107 RTUINT64U TmpRsp;
4108 TmpRsp.u = pCtx->rsp;
4109 switch (enmEffOpSize)
4110 {
4111 case IEMMODE_16BIT:
4112 {
4113 uint16_t uSel;
4114 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
4115 if (rcStrict == VINF_SUCCESS)
4116 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4117 break;
4118 }
4119
4120 case IEMMODE_32BIT:
4121 {
4122 uint32_t u32Value;
4123 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
4124 if (rcStrict == VINF_SUCCESS)
4125 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
4126 break;
4127 }
4128
4129 case IEMMODE_64BIT:
4130 {
4131 uint64_t u64Value;
4132 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
4133 if (rcStrict == VINF_SUCCESS)
4134 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
4135 break;
4136 }
4137 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4138 }
4139
4140 /*
4141 * Commit the stack on success.
4142 */
4143 if (rcStrict == VINF_SUCCESS)
4144 {
4145 pCtx->rsp = TmpRsp.u;
4146 if (iSegReg == X86_SREG_SS)
4147 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4148 }
4149 return rcStrict;
4150}
4151
4152
4153/**
4154 * Implements lgs, lfs, les, lds & lss.
4155 */
4156IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
4157 uint16_t, uSel,
4158 uint64_t, offSeg,
4159 uint8_t, iSegReg,
4160 uint8_t, iGReg,
4161 IEMMODE, enmEffOpSize)
4162{
4163 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
4164 VBOXSTRICTRC rcStrict;
4165
4166 /*
4167 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
4168 */
4169 /** @todo verify and test that mov, pop and lXs works the segment
4170 * register loading in the exact same way. */
4171 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4172 if (rcStrict == VINF_SUCCESS)
4173 {
4174 switch (enmEffOpSize)
4175 {
4176 case IEMMODE_16BIT:
4177 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4178 break;
4179 case IEMMODE_32BIT:
4180 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4181 break;
4182 case IEMMODE_64BIT:
4183 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4184 break;
4185 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4186 }
4187 }
4188
4189 return rcStrict;
4190}
4191
4192
4193/**
4194 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
4195 *
4196 * @retval VINF_SUCCESS on success.
4197 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
4198 * @retval iemMemFetchSysU64 return value.
4199 *
4200 * @param pIemCpu The IEM state of the calling EMT.
4201 * @param uSel The selector value.
4202 * @param fAllowSysDesc Whether system descriptors are OK or not.
4203 * @param pDesc Where to return the descriptor on success.
4204 */
4205static VBOXSTRICTRC iemCImpl_LoadDescHelper(PIEMCPU pIemCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
4206{
4207 pDesc->Long.au64[0] = 0;
4208 pDesc->Long.au64[1] = 0;
4209
4210 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */
4211 return VINF_IEM_SELECTOR_NOT_OK;
4212
4213 /* Within the table limits? */
4214 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4215 RTGCPTR GCPtrBase;
4216 if (uSel & X86_SEL_LDT)
4217 {
4218 if ( !pCtx->ldtr.Attr.n.u1Present
4219 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
4220 return VINF_IEM_SELECTOR_NOT_OK;
4221 GCPtrBase = pCtx->ldtr.u64Base;
4222 }
4223 else
4224 {
4225 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
4226 return VINF_IEM_SELECTOR_NOT_OK;
4227 GCPtrBase = pCtx->gdtr.pGdt;
4228 }
4229
4230 /* Fetch the descriptor. */
4231 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4232 if (rcStrict != VINF_SUCCESS)
4233 return rcStrict;
4234 if (!pDesc->Legacy.Gen.u1DescType)
4235 {
4236 if (!fAllowSysDesc)
4237 return VINF_IEM_SELECTOR_NOT_OK;
4238 if (CPUMIsGuestInLongModeEx(pCtx))
4239 {
4240 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
4241 if (rcStrict != VINF_SUCCESS)
4242 return rcStrict;
4243 }
4244
4245 }
4246
4247 return VINF_SUCCESS;
4248}
4249
4250
4251/**
4252 * Implements verr (fWrite = false) and verw (fWrite = true).
4253 */
4254IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite)
4255{
4256 Assert(!IEM_IS_REAL_OR_V86_MODE(pIemCpu));
4257
4258 /** @todo figure whether the accessed bit is set or not. */
4259
4260 bool fAccessible = true;
4261 IEMSELDESC Desc;
4262 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pIemCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4263 if (rcStrict == VINF_SUCCESS)
4264 {
4265 /* Check the descriptor, order doesn't matter much here. */
4266 if ( !Desc.Legacy.Gen.u1DescType
4267 || !Desc.Legacy.Gen.u1Present)
4268 fAccessible = false;
4269 else
4270 {
4271 if ( fWrite
4272 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE
4273 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4274 fAccessible = false;
4275
4276 /** @todo testcase for the conforming behavior. */
4277 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4278 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4279 {
4280 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4281 fAccessible = false;
4282 else if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4283 fAccessible = false;
4284 }
4285 }
4286
4287 }
4288 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4289 fAccessible = false;
4290 else
4291 return rcStrict;
4292
4293 /* commit */
4294 pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fAccessible;
4295
4296 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4297 return VINF_SUCCESS;
4298}
4299
4300
4301/**
4302 * Implements LAR and LSL with 64-bit operand size.
4303 *
4304 * @returns VINF_SUCCESS.
4305 * @param pu16Dst Pointer to the destination register.
4306 * @param uSel The selector to load details for.
4307 * @param pEFlags Pointer to the eflags register.
4308 * @param fIsLar true = LAR, false = LSL.
4309 */
4310IEM_CIMPL_DEF_4(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, uint32_t *, pEFlags, bool, fIsLar)
4311{
4312 Assert(!IEM_IS_REAL_OR_V86_MODE(pIemCpu));
4313
4314 /** @todo figure whether the accessed bit is set or not. */
4315
4316 bool fDescOk = true;
4317 IEMSELDESC Desc;
4318 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pIemCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4319 if (rcStrict == VINF_SUCCESS)
4320 {
4321 /*
4322 * Check the descriptor type.
4323 */
4324 if (!Desc.Legacy.Gen.u1DescType)
4325 {
4326 if (CPUMIsGuestInLongModeEx(pIemCpu->CTX_SUFF(pCtx)))
4327 {
4328 if (Desc.Long.Gen.u5Zeros)
4329 fDescOk = false;
4330 else
4331 switch (Desc.Long.Gen.u4Type)
4332 {
4333 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
4334 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
4335 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
4336 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
4337 break;
4338 case AMD64_SEL_TYPE_SYS_CALL_GATE:
4339 fDescOk = fIsLar;
4340 break;
4341 default:
4342 fDescOk = false;
4343 break;
4344 }
4345 }
4346 else
4347 {
4348 switch (Desc.Long.Gen.u4Type)
4349 {
4350 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4351 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4352 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4353 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4354 case X86_SEL_TYPE_SYS_LDT:
4355 break;
4356 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4357 case X86_SEL_TYPE_SYS_TASK_GATE:
4358 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4359 fDescOk = fIsLar;
4360 break;
4361 default:
4362 fDescOk = false;
4363 break;
4364 }
4365 }
4366 }
4367 if (fDescOk)
4368 {
4369 /*
4370 * Check the RPL/DPL/CPL interaction..
4371 */
4372 /** @todo testcase for the conforming behavior. */
4373 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
4374 || !Desc.Legacy.Gen.u1DescType)
4375 {
4376 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4377 fDescOk = false;
4378 else if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4379 fDescOk = false;
4380 }
4381 }
4382
4383 if (fDescOk)
4384 {
4385 /*
4386 * All fine, start committing the result.
4387 */
4388 if (fIsLar)
4389 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00);
4390 else
4391 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy);
4392 }
4393
4394 }
4395 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4396 fDescOk = false;
4397 else
4398 return rcStrict;
4399
4400 /* commit flags value and advance rip. */
4401 pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fDescOk;
4402 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4403
4404 return VINF_SUCCESS;
4405}
4406
4407
4408/**
4409 * Implements LAR and LSL with 16-bit operand size.
4410 *
4411 * @returns VINF_SUCCESS.
4412 * @param pu16Dst Pointer to the destination register.
4413 * @param u16Sel The selector to load details for.
4414 * @param pEFlags Pointer to the eflags register.
4415 * @param fIsLar true = LAR, false = LSL.
4416 */
4417IEM_CIMPL_DEF_4(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, uint32_t *, pEFlags, bool, fIsLar)
4418{
4419 uint64_t u64TmpDst = *pu16Dst;
4420 IEM_CIMPL_CALL_4(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, pEFlags, fIsLar);
4421 *pu16Dst = (uint16_t)u64TmpDst;
4422 return VINF_SUCCESS;
4423}
4424
4425
4426/**
4427 * Implements lgdt.
4428 *
4429 * @param iEffSeg The segment of the new gdtr contents
4430 * @param GCPtrEffSrc The address of the new gdtr contents.
4431 * @param enmEffOpSize The effective operand size.
4432 */
4433IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4434{
4435 if (pIemCpu->uCpl != 0)
4436 return iemRaiseGeneralProtectionFault0(pIemCpu);
4437 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
4438
4439 /*
4440 * Fetch the limit and base address.
4441 */
4442 uint16_t cbLimit;
4443 RTGCPTR GCPtrBase;
4444 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4445 if (rcStrict == VINF_SUCCESS)
4446 {
4447 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
4448 || X86_IS_CANONICAL(GCPtrBase))
4449 {
4450 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4451 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
4452 else
4453 {
4454 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4455 pCtx->gdtr.cbGdt = cbLimit;
4456 pCtx->gdtr.pGdt = GCPtrBase;
4457 }
4458 if (rcStrict == VINF_SUCCESS)
4459 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4460 }
4461 else
4462 {
4463 Log(("iemCImpl_lgdt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
4464 return iemRaiseGeneralProtectionFault0(pIemCpu);
4465 }
4466 }
4467 return rcStrict;
4468}
4469
4470
4471/**
4472 * Implements sgdt.
4473 *
4474 * @param iEffSeg The segment where to store the gdtr content.
4475 * @param GCPtrEffDst The address where to store the gdtr content.
4476 */
4477IEM_CIMPL_DEF_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4478{
4479 /*
4480 * Join paths with sidt.
4481 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4482 * you really must know.
4483 */
4484 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4485 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst);
4486 if (rcStrict == VINF_SUCCESS)
4487 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4488 return rcStrict;
4489}
4490
4491
4492/**
4493 * Implements lidt.
4494 *
4495 * @param iEffSeg The segment of the new idtr contents
4496 * @param GCPtrEffSrc The address of the new idtr contents.
4497 * @param enmEffOpSize The effective operand size.
4498 */
4499IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4500{
4501 if (pIemCpu->uCpl != 0)
4502 return iemRaiseGeneralProtectionFault0(pIemCpu);
4503 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
4504
4505 /*
4506 * Fetch the limit and base address.
4507 */
4508 uint16_t cbLimit;
4509 RTGCPTR GCPtrBase;
4510 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4511 if (rcStrict == VINF_SUCCESS)
4512 {
4513 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
4514 || X86_IS_CANONICAL(GCPtrBase))
4515 {
4516 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4517 CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
4518 else
4519 {
4520 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4521 pCtx->idtr.cbIdt = cbLimit;
4522 pCtx->idtr.pIdt = GCPtrBase;
4523 }
4524 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4525 }
4526 else
4527 {
4528 Log(("iemCImpl_lidt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
4529 return iemRaiseGeneralProtectionFault0(pIemCpu);
4530 }
4531 }
4532 return rcStrict;
4533}
4534
4535
4536/**
4537 * Implements sidt.
4538 *
4539 * @param iEffSeg The segment where to store the idtr content.
4540 * @param GCPtrEffDst The address where to store the idtr content.
4541 */
4542IEM_CIMPL_DEF_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4543{
4544 /*
4545 * Join paths with sgdt.
4546 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4547 * you really must know.
4548 */
4549 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4550 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst);
4551 if (rcStrict == VINF_SUCCESS)
4552 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4553 return rcStrict;
4554}
4555
4556
4557/**
4558 * Implements lldt.
4559 *
4560 * @param uNewLdt The new LDT selector value.
4561 */
4562IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
4563{
4564 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4565
4566 /*
4567 * Check preconditions.
4568 */
4569 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4570 {
4571 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
4572 return iemRaiseUndefinedOpcode(pIemCpu);
4573 }
4574 if (pIemCpu->uCpl != 0)
4575 {
4576 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
4577 return iemRaiseGeneralProtectionFault0(pIemCpu);
4578 }
4579 if (uNewLdt & X86_SEL_LDT)
4580 {
4581 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
4582 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
4583 }
4584
4585 /*
4586 * Now, loading a NULL selector is easy.
4587 */
4588 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4589 {
4590 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
4591 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4592 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt);
4593 else
4594 pCtx->ldtr.Sel = uNewLdt;
4595 pCtx->ldtr.ValidSel = uNewLdt;
4596 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4597 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
4598 {
4599 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4600 pCtx->ldtr.u64Base = pCtx->ldtr.u32Limit = 0; /* For verfication against REM. */
4601 }
4602 else if (IEM_IS_GUEST_CPU_AMD(pIemCpu))
4603 {
4604 /* AMD-V seems to leave the base and limit alone. */
4605 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4606 }
4607 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
4608 {
4609 /* VT-x (Intel 3960x) seems to be doing the following. */
4610 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;
4611 pCtx->ldtr.u64Base = 0;
4612 pCtx->ldtr.u32Limit = UINT32_MAX;
4613 }
4614
4615 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4616 return VINF_SUCCESS;
4617 }
4618
4619 /*
4620 * Read the descriptor.
4621 */
4622 IEMSELDESC Desc;
4623 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
4624 if (rcStrict != VINF_SUCCESS)
4625 return rcStrict;
4626
4627 /* Check GPs first. */
4628 if (Desc.Legacy.Gen.u1DescType)
4629 {
4630 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4631 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4632 }
4633 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4634 {
4635 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4636 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4637 }
4638 uint64_t u64Base;
4639 if (!IEM_IS_LONG_MODE(pIemCpu))
4640 u64Base = X86DESC_BASE(&Desc.Legacy);
4641 else
4642 {
4643 if (Desc.Long.Gen.u5Zeros)
4644 {
4645 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
4646 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4647 }
4648
4649 u64Base = X86DESC64_BASE(&Desc.Long);
4650 if (!IEM_IS_CANONICAL(u64Base))
4651 {
4652 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
4653 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4654 }
4655 }
4656
4657 /* NP */
4658 if (!Desc.Legacy.Gen.u1Present)
4659 {
4660 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
4661 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
4662 }
4663
4664 /*
4665 * It checks out alright, update the registers.
4666 */
4667/** @todo check if the actual value is loaded or if the RPL is dropped */
4668 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4669 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK_OFF_RPL);
4670 else
4671 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4672 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4673 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4674 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4675 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
4676 pCtx->ldtr.u64Base = u64Base;
4677
4678 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4679 return VINF_SUCCESS;
4680}
4681
4682
4683/**
4684 * Implements lldt.
4685 *
4686 * @param uNewLdt The new LDT selector value.
4687 */
4688IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
4689{
4690 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4691
4692 /*
4693 * Check preconditions.
4694 */
4695 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4696 {
4697 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
4698 return iemRaiseUndefinedOpcode(pIemCpu);
4699 }
4700 if (pIemCpu->uCpl != 0)
4701 {
4702 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
4703 return iemRaiseGeneralProtectionFault0(pIemCpu);
4704 }
4705 if (uNewTr & X86_SEL_LDT)
4706 {
4707 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
4708 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
4709 }
4710 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
4711 {
4712 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
4713 return iemRaiseGeneralProtectionFault0(pIemCpu);
4714 }
4715
4716 /*
4717 * Read the descriptor.
4718 */
4719 IEMSELDESC Desc;
4720 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
4721 if (rcStrict != VINF_SUCCESS)
4722 return rcStrict;
4723
4724 /* Check GPs first. */
4725 if (Desc.Legacy.Gen.u1DescType)
4726 {
4727 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
4728 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4729 }
4730 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
4731 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4732 || IEM_IS_LONG_MODE(pIemCpu)) )
4733 {
4734 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
4735 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4736 }
4737 uint64_t u64Base;
4738 if (!IEM_IS_LONG_MODE(pIemCpu))
4739 u64Base = X86DESC_BASE(&Desc.Legacy);
4740 else
4741 {
4742 if (Desc.Long.Gen.u5Zeros)
4743 {
4744 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
4745 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4746 }
4747
4748 u64Base = X86DESC64_BASE(&Desc.Long);
4749 if (!IEM_IS_CANONICAL(u64Base))
4750 {
4751 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
4752 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4753 }
4754 }
4755
4756 /* NP */
4757 if (!Desc.Legacy.Gen.u1Present)
4758 {
4759 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
4760 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
4761 }
4762
4763 /*
4764 * Set it busy.
4765 * Note! Intel says this should lock down the whole descriptor, but we'll
4766 * restrict our selves to 32-bit for now due to lack of inline
4767 * assembly and such.
4768 */
4769 void *pvDesc;
4770 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW);
4771 if (rcStrict != VINF_SUCCESS)
4772 return rcStrict;
4773 switch ((uintptr_t)pvDesc & 3)
4774 {
4775 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
4776 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
4777 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
4778 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
4779 }
4780 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvDesc, IEM_ACCESS_DATA_RW);
4781 if (rcStrict != VINF_SUCCESS)
4782 return rcStrict;
4783 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4784
4785 /*
4786 * It checks out alright, update the registers.
4787 */
4788/** @todo check if the actual value is loaded or if the RPL is dropped */
4789 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4790 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK_OFF_RPL);
4791 else
4792 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
4793 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
4794 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4795 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4796 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
4797 pCtx->tr.u64Base = u64Base;
4798
4799 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4800 return VINF_SUCCESS;
4801}
4802
4803
4804/**
4805 * Implements mov GReg,CRx.
4806 *
4807 * @param iGReg The general register to store the CRx value in.
4808 * @param iCrReg The CRx register to read (valid).
4809 */
4810IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
4811{
4812 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4813 if (pIemCpu->uCpl != 0)
4814 return iemRaiseGeneralProtectionFault0(pIemCpu);
4815 Assert(!pCtx->eflags.Bits.u1VM);
4816
4817 /* read it */
4818 uint64_t crX;
4819 switch (iCrReg)
4820 {
4821 case 0:
4822 crX = pCtx->cr0;
4823 if (IEM_GET_TARGET_CPU(pIemCpu) <= IEMTARGETCPU_386)
4824 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */
4825 break;
4826 case 2: crX = pCtx->cr2; break;
4827 case 3: crX = pCtx->cr3; break;
4828 case 4: crX = pCtx->cr4; break;
4829 case 8:
4830 {
4831 uint8_t uTpr;
4832 int rc = PDMApicGetTPR(IEMCPU_TO_VMCPU(pIemCpu), &uTpr, NULL, NULL);
4833 if (RT_SUCCESS(rc))
4834 crX = uTpr >> 4;
4835 else
4836 crX = 0;
4837 break;
4838 }
4839 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
4840 }
4841
4842 /* store it */
4843 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4844 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
4845 else
4846 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
4847
4848 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4849 return VINF_SUCCESS;
4850}
4851
4852
4853/**
4854 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
4855 *
4856 * @param iCrReg The CRx register to write (valid).
4857 * @param uNewCrX The new value.
4858 */
4859IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
4860{
4861 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4862 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4863 VBOXSTRICTRC rcStrict;
4864 int rc;
4865
4866 /*
4867 * Try store it.
4868 * Unfortunately, CPUM only does a tiny bit of the work.
4869 */
4870 switch (iCrReg)
4871 {
4872 case 0:
4873 {
4874 /*
4875 * Perform checks.
4876 */
4877 uint64_t const uOldCrX = pCtx->cr0;
4878 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
4879 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
4880 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
4881
4882 /* ET is hardcoded on 486 and later. */
4883 if (IEM_GET_TARGET_CPU(pIemCpu) > IEMTARGETCPU_486)
4884 uNewCrX |= X86_CR0_ET;
4885 /* The 386 and 486 didn't #GP(0) on attempting to set reserved CR0 bits. ET was settable on 386. */
4886 else if (IEM_GET_TARGET_CPU(pIemCpu) == IEMTARGETCPU_486)
4887 {
4888 uNewCrX &= fValid;
4889 uNewCrX |= X86_CR0_ET;
4890 }
4891 else
4892 uNewCrX &= X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG | X86_CR0_ET;
4893
4894 /* Check for reserved bits. */
4895 if (uNewCrX & ~(uint64_t)fValid)
4896 {
4897 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
4898 return iemRaiseGeneralProtectionFault0(pIemCpu);
4899 }
4900
4901 /* Check for invalid combinations. */
4902 if ( (uNewCrX & X86_CR0_PG)
4903 && !(uNewCrX & X86_CR0_PE) )
4904 {
4905 Log(("Trying to set CR0.PG without CR0.PE\n"));
4906 return iemRaiseGeneralProtectionFault0(pIemCpu);
4907 }
4908
4909 if ( !(uNewCrX & X86_CR0_CD)
4910 && (uNewCrX & X86_CR0_NW) )
4911 {
4912 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
4913 return iemRaiseGeneralProtectionFault0(pIemCpu);
4914 }
4915
4916 /* Long mode consistency checks. */
4917 if ( (uNewCrX & X86_CR0_PG)
4918 && !(uOldCrX & X86_CR0_PG)
4919 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
4920 {
4921 if (!(pCtx->cr4 & X86_CR4_PAE))
4922 {
4923 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
4924 return iemRaiseGeneralProtectionFault0(pIemCpu);
4925 }
4926 if (pCtx->cs.Attr.n.u1Long)
4927 {
4928 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
4929 return iemRaiseGeneralProtectionFault0(pIemCpu);
4930 }
4931 }
4932
4933 /** @todo check reserved PDPTR bits as AMD states. */
4934
4935 /*
4936 * Change CR0.
4937 */
4938 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4939 CPUMSetGuestCR0(pVCpu, uNewCrX);
4940 else
4941 pCtx->cr0 = uNewCrX;
4942 Assert(pCtx->cr0 == uNewCrX);
4943
4944 /*
4945 * Change EFER.LMA if entering or leaving long mode.
4946 */
4947 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
4948 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
4949 {
4950 uint64_t NewEFER = pCtx->msrEFER;
4951 if (uNewCrX & X86_CR0_PG)
4952 NewEFER |= MSR_K6_EFER_LMA;
4953 else
4954 NewEFER &= ~MSR_K6_EFER_LMA;
4955
4956 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4957 CPUMSetGuestEFER(pVCpu, NewEFER);
4958 else
4959 pCtx->msrEFER = NewEFER;
4960 Assert(pCtx->msrEFER == NewEFER);
4961 }
4962
4963 /*
4964 * Inform PGM.
4965 */
4966 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4967 {
4968 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
4969 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
4970 {
4971 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
4972 AssertRCReturn(rc, rc);
4973 /* ignore informational status codes */
4974 }
4975 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
4976 }
4977 else
4978 rcStrict = VINF_SUCCESS;
4979
4980#ifdef IN_RC
4981 /* Return to ring-3 for rescheduling if WP or AM changes. */
4982 if ( rcStrict == VINF_SUCCESS
4983 && ( (uNewCrX & (X86_CR0_WP | X86_CR0_AM))
4984 != (uOldCrX & (X86_CR0_WP | X86_CR0_AM))) )
4985 rcStrict = VINF_EM_RESCHEDULE;
4986#endif
4987 break;
4988 }
4989
4990 /*
4991 * CR2 can be changed without any restrictions.
4992 */
4993 case 2:
4994 pCtx->cr2 = uNewCrX;
4995 rcStrict = VINF_SUCCESS;
4996 break;
4997
4998 /*
4999 * CR3 is relatively simple, although AMD and Intel have different
5000 * accounts of how setting reserved bits are handled. We take intel's
5001 * word for the lower bits and AMD's for the high bits (63:52). The
5002 * lower reserved bits are ignored and left alone; OpenBSD 5.8 relies
5003 * on this.
5004 */
5005 /** @todo Testcase: Setting reserved bits in CR3, especially before
5006 * enabling paging. */
5007 case 3:
5008 {
5009 /* check / mask the value. */
5010 if (uNewCrX & UINT64_C(0xfff0000000000000))
5011 {
5012 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
5013 return iemRaiseGeneralProtectionFault0(pIemCpu);
5014 }
5015
5016 uint64_t fValid;
5017 if ( (pCtx->cr4 & X86_CR4_PAE)
5018 && (pCtx->msrEFER & MSR_K6_EFER_LME))
5019 fValid = UINT64_C(0x000fffffffffffff);
5020 else
5021 fValid = UINT64_C(0xffffffff);
5022 if (uNewCrX & ~fValid)
5023 {
5024 Log(("Automatically clearing reserved MBZ bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
5025 uNewCrX, uNewCrX & ~fValid));
5026 uNewCrX &= fValid;
5027 }
5028
5029 /** @todo If we're in PAE mode we should check the PDPTRs for
5030 * invalid bits. */
5031
5032 /* Make the change. */
5033 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5034 {
5035 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
5036 AssertRCSuccessReturn(rc, rc);
5037 }
5038 else
5039 pCtx->cr3 = uNewCrX;
5040
5041 /* Inform PGM. */
5042 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5043 {
5044 if (pCtx->cr0 & X86_CR0_PG)
5045 {
5046 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
5047 AssertRCReturn(rc, rc);
5048 /* ignore informational status codes */
5049 }
5050 }
5051 rcStrict = VINF_SUCCESS;
5052 break;
5053 }
5054
5055 /*
5056 * CR4 is a bit more tedious as there are bits which cannot be cleared
5057 * under some circumstances and such.
5058 */
5059 case 4:
5060 {
5061 uint64_t const uOldCrX = pCtx->cr4;
5062
5063 /** @todo Shouldn't this look at the guest CPUID bits to determine
5064 * valid bits? e.g. if guest CPUID doesn't allow X86_CR4_OSXMMEEXCPT, we
5065 * should #GP(0). */
5066 /* reserved bits */
5067 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
5068 | X86_CR4_TSD | X86_CR4_DE
5069 | X86_CR4_PSE | X86_CR4_PAE
5070 | X86_CR4_MCE | X86_CR4_PGE
5071 | X86_CR4_PCE | X86_CR4_OSFXSR
5072 | X86_CR4_OSXMMEEXCPT;
5073 //if (xxx)
5074 // fValid |= X86_CR4_VMXE;
5075 if (IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fXSaveRstor)
5076 fValid |= X86_CR4_OSXSAVE;
5077 if (uNewCrX & ~(uint64_t)fValid)
5078 {
5079 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5080 return iemRaiseGeneralProtectionFault0(pIemCpu);
5081 }
5082
5083 /* long mode checks. */
5084 if ( (uOldCrX & X86_CR4_PAE)
5085 && !(uNewCrX & X86_CR4_PAE)
5086 && CPUMIsGuestInLongModeEx(pCtx) )
5087 {
5088 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
5089 return iemRaiseGeneralProtectionFault0(pIemCpu);
5090 }
5091
5092
5093 /*
5094 * Change it.
5095 */
5096 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5097 {
5098 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
5099 AssertRCSuccessReturn(rc, rc);
5100 }
5101 else
5102 pCtx->cr4 = uNewCrX;
5103 Assert(pCtx->cr4 == uNewCrX);
5104
5105 /*
5106 * Notify SELM and PGM.
5107 */
5108 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5109 {
5110 /* SELM - VME may change things wrt to the TSS shadowing. */
5111 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
5112 {
5113 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
5114 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
5115#ifdef VBOX_WITH_RAW_MODE
5116 if (!HMIsEnabled(IEMCPU_TO_VM(pIemCpu)))
5117 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
5118#endif
5119 }
5120
5121 /* PGM - flushing and mode. */
5122 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE))
5123 {
5124 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
5125 AssertRCReturn(rc, rc);
5126 /* ignore informational status codes */
5127 }
5128 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
5129 }
5130 else
5131 rcStrict = VINF_SUCCESS;
5132 break;
5133 }
5134
5135 /*
5136 * CR8 maps to the APIC TPR.
5137 */
5138 case 8:
5139 if (uNewCrX & ~(uint64_t)0xf)
5140 {
5141 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
5142 return iemRaiseGeneralProtectionFault0(pIemCpu);
5143 }
5144
5145 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5146 PDMApicSetTPR(IEMCPU_TO_VMCPU(pIemCpu), (uint8_t)uNewCrX << 4);
5147 rcStrict = VINF_SUCCESS;
5148 break;
5149
5150 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5151 }
5152
5153 /*
5154 * Advance the RIP on success.
5155 */
5156 if (RT_SUCCESS(rcStrict))
5157 {
5158 if (rcStrict != VINF_SUCCESS)
5159 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
5160 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5161 }
5162
5163 return rcStrict;
5164}
5165
5166
5167/**
5168 * Implements mov CRx,GReg.
5169 *
5170 * @param iCrReg The CRx register to write (valid).
5171 * @param iGReg The general register to load the DRx value from.
5172 */
5173IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
5174{
5175 if (pIemCpu->uCpl != 0)
5176 return iemRaiseGeneralProtectionFault0(pIemCpu);
5177 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
5178
5179 /*
5180 * Read the new value from the source register and call common worker.
5181 */
5182 uint64_t uNewCrX;
5183 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5184 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
5185 else
5186 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
5187 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
5188}
5189
5190
5191/**
5192 * Implements 'LMSW r/m16'
5193 *
5194 * @param u16NewMsw The new value.
5195 */
5196IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
5197{
5198 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5199
5200 if (pIemCpu->uCpl != 0)
5201 return iemRaiseGeneralProtectionFault0(pIemCpu);
5202 Assert(!pCtx->eflags.Bits.u1VM);
5203
5204 /*
5205 * Compose the new CR0 value and call common worker.
5206 */
5207 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5208 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5209 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
5210}
5211
5212
5213/**
5214 * Implements 'CLTS'.
5215 */
5216IEM_CIMPL_DEF_0(iemCImpl_clts)
5217{
5218 if (pIemCpu->uCpl != 0)
5219 return iemRaiseGeneralProtectionFault0(pIemCpu);
5220
5221 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5222 uint64_t uNewCr0 = pCtx->cr0;
5223 uNewCr0 &= ~X86_CR0_TS;
5224 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
5225}
5226
5227
5228/**
5229 * Implements mov GReg,DRx.
5230 *
5231 * @param iGReg The general register to store the DRx value in.
5232 * @param iDrReg The DRx register to read (0-7).
5233 */
5234IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
5235{
5236 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5237
5238 /*
5239 * Check preconditions.
5240 */
5241
5242 /* Raise GPs. */
5243 if (pIemCpu->uCpl != 0)
5244 return iemRaiseGeneralProtectionFault0(pIemCpu);
5245 Assert(!pCtx->eflags.Bits.u1VM);
5246
5247 if ( (iDrReg == 4 || iDrReg == 5)
5248 && (pCtx->cr4 & X86_CR4_DE) )
5249 {
5250 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
5251 return iemRaiseGeneralProtectionFault0(pIemCpu);
5252 }
5253
5254 /* Raise #DB if general access detect is enabled. */
5255 if (pCtx->dr[7] & X86_DR7_GD)
5256 {
5257 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
5258 return iemRaiseDebugException(pIemCpu);
5259 }
5260
5261 /*
5262 * Read the debug register and store it in the specified general register.
5263 */
5264 uint64_t drX;
5265 switch (iDrReg)
5266 {
5267 case 0: drX = pCtx->dr[0]; break;
5268 case 1: drX = pCtx->dr[1]; break;
5269 case 2: drX = pCtx->dr[2]; break;
5270 case 3: drX = pCtx->dr[3]; break;
5271 case 6:
5272 case 4:
5273 drX = pCtx->dr[6];
5274 drX |= X86_DR6_RA1_MASK;
5275 drX &= ~X86_DR6_RAZ_MASK;
5276 break;
5277 case 7:
5278 case 5:
5279 drX = pCtx->dr[7];
5280 drX |=X86_DR7_RA1_MASK;
5281 drX &= ~X86_DR7_RAZ_MASK;
5282 break;
5283 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5284 }
5285
5286 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5287 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
5288 else
5289 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
5290
5291 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5292 return VINF_SUCCESS;
5293}
5294
5295
5296/**
5297 * Implements mov DRx,GReg.
5298 *
5299 * @param iDrReg The DRx register to write (valid).
5300 * @param iGReg The general register to load the DRx value from.
5301 */
5302IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
5303{
5304 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5305
5306 /*
5307 * Check preconditions.
5308 */
5309 if (pIemCpu->uCpl != 0)
5310 return iemRaiseGeneralProtectionFault0(pIemCpu);
5311 Assert(!pCtx->eflags.Bits.u1VM);
5312
5313 if (iDrReg == 4 || iDrReg == 5)
5314 {
5315 if (pCtx->cr4 & X86_CR4_DE)
5316 {
5317 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
5318 return iemRaiseGeneralProtectionFault0(pIemCpu);
5319 }
5320 iDrReg += 2;
5321 }
5322
5323 /* Raise #DB if general access detect is enabled. */
5324 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
5325 * \#GP? */
5326 if (pCtx->dr[7] & X86_DR7_GD)
5327 {
5328 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
5329 return iemRaiseDebugException(pIemCpu);
5330 }
5331
5332 /*
5333 * Read the new value from the source register.
5334 */
5335 uint64_t uNewDrX;
5336 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5337 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
5338 else
5339 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
5340
5341 /*
5342 * Adjust it.
5343 */
5344 switch (iDrReg)
5345 {
5346 case 0:
5347 case 1:
5348 case 2:
5349 case 3:
5350 /* nothing to adjust */
5351 break;
5352
5353 case 6:
5354 if (uNewDrX & X86_DR6_MBZ_MASK)
5355 {
5356 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5357 return iemRaiseGeneralProtectionFault0(pIemCpu);
5358 }
5359 uNewDrX |= X86_DR6_RA1_MASK;
5360 uNewDrX &= ~X86_DR6_RAZ_MASK;
5361 break;
5362
5363 case 7:
5364 if (uNewDrX & X86_DR7_MBZ_MASK)
5365 {
5366 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5367 return iemRaiseGeneralProtectionFault0(pIemCpu);
5368 }
5369 uNewDrX |= X86_DR7_RA1_MASK;
5370 uNewDrX &= ~X86_DR7_RAZ_MASK;
5371 break;
5372
5373 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5374 }
5375
5376 /*
5377 * Do the actual setting.
5378 */
5379 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5380 {
5381 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
5382 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc);
5383 }
5384 else
5385 pCtx->dr[iDrReg] = uNewDrX;
5386
5387 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5388 return VINF_SUCCESS;
5389}
5390
5391
5392/**
5393 * Implements 'INVLPG m'.
5394 *
5395 * @param GCPtrPage The effective address of the page to invalidate.
5396 * @remarks Updates the RIP.
5397 */
5398IEM_CIMPL_DEF_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage)
5399{
5400 /* ring-0 only. */
5401 if (pIemCpu->uCpl != 0)
5402 return iemRaiseGeneralProtectionFault0(pIemCpu);
5403 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
5404
5405 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
5406 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5407
5408 if (rc == VINF_SUCCESS)
5409 return VINF_SUCCESS;
5410 if (rc == VINF_PGM_SYNC_CR3)
5411 return iemSetPassUpStatus(pIemCpu, rc);
5412
5413 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
5414 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", GCPtrPage, rc));
5415 return rc;
5416}
5417
5418
5419/**
5420 * Implements RDTSC.
5421 */
5422IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
5423{
5424 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5425
5426 /*
5427 * Check preconditions.
5428 */
5429 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fTsc)
5430 return iemRaiseUndefinedOpcode(pIemCpu);
5431
5432 if ( (pCtx->cr4 & X86_CR4_TSD)
5433 && pIemCpu->uCpl != 0)
5434 {
5435 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
5436 return iemRaiseGeneralProtectionFault0(pIemCpu);
5437 }
5438
5439 /*
5440 * Do the job.
5441 */
5442 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
5443 pCtx->rax = (uint32_t)uTicks;
5444 pCtx->rdx = uTicks >> 32;
5445#ifdef IEM_VERIFICATION_MODE_FULL
5446 pIemCpu->fIgnoreRaxRdx = true;
5447#endif
5448
5449 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5450 return VINF_SUCCESS;
5451}
5452
5453
5454/**
5455 * Implements RDMSR.
5456 */
5457IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
5458{
5459 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5460
5461 /*
5462 * Check preconditions.
5463 */
5464 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMsr)
5465 return iemRaiseUndefinedOpcode(pIemCpu);
5466 if (pIemCpu->uCpl != 0)
5467 return iemRaiseGeneralProtectionFault0(pIemCpu);
5468
5469 /*
5470 * Do the job.
5471 */
5472 RTUINT64U uValue;
5473 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
5474 if (rcStrict == VINF_SUCCESS)
5475 {
5476 pCtx->rax = uValue.s.Lo;
5477 pCtx->rdx = uValue.s.Hi;
5478
5479 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5480 return VINF_SUCCESS;
5481 }
5482
5483#ifndef IN_RING3
5484 /* Deferred to ring-3. */
5485 if (rcStrict == VINF_CPUM_R3_MSR_READ)
5486 {
5487 Log(("IEM: rdmsr(%#x) -> ring-3\n", pCtx->ecx));
5488 return rcStrict;
5489 }
5490#else /* IN_RING3 */
5491 /* Often a unimplemented MSR or MSR bit, so worth logging. */
5492 static uint32_t s_cTimes = 0;
5493 if (s_cTimes++ < 10)
5494 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
5495 else
5496#endif
5497 Log(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
5498 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
5499 return iemRaiseGeneralProtectionFault0(pIemCpu);
5500}
5501
5502
5503/**
5504 * Implements WRMSR.
5505 */
5506IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
5507{
5508 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5509
5510 /*
5511 * Check preconditions.
5512 */
5513 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMsr)
5514 return iemRaiseUndefinedOpcode(pIemCpu);
5515 if (pIemCpu->uCpl != 0)
5516 return iemRaiseGeneralProtectionFault0(pIemCpu);
5517
5518 /*
5519 * Do the job.
5520 */
5521 RTUINT64U uValue;
5522 uValue.s.Lo = pCtx->eax;
5523 uValue.s.Hi = pCtx->edx;
5524
5525 VBOXSTRICTRC rcStrict;
5526 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5527 rcStrict = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
5528 else
5529 {
5530#ifdef IN_RING3
5531 CPUMCTX CtxTmp = *pCtx;
5532 rcStrict = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
5533 PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
5534 *pCtx = *pCtx2;
5535 *pCtx2 = CtxTmp;
5536#else
5537 AssertReleaseFailedReturn(VERR_IEM_IPE_2);
5538#endif
5539 }
5540 if (rcStrict == VINF_SUCCESS)
5541 {
5542 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5543 return VINF_SUCCESS;
5544 }
5545
5546#ifndef IN_RING3
5547 /* Deferred to ring-3. */
5548 if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
5549 {
5550 Log(("IEM: rdmsr(%#x) -> ring-3\n", pCtx->ecx));
5551 return rcStrict;
5552 }
5553#else /* IN_RING3 */
5554 /* Often a unimplemented MSR or MSR bit, so worth logging. */
5555 static uint32_t s_cTimes = 0;
5556 if (s_cTimes++ < 10)
5557 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
5558 else
5559#endif
5560 Log(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
5561 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
5562 return iemRaiseGeneralProtectionFault0(pIemCpu);
5563}
5564
5565
5566/**
5567 * Implements 'IN eAX, port'.
5568 *
5569 * @param u16Port The source port.
5570 * @param cbReg The register size.
5571 */
5572IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
5573{
5574 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5575
5576 /*
5577 * CPL check
5578 */
5579 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
5580 if (rcStrict != VINF_SUCCESS)
5581 return rcStrict;
5582
5583 /*
5584 * Perform the I/O.
5585 */
5586 uint32_t u32Value;
5587 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5588 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, &u32Value, cbReg);
5589 else
5590 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
5591 if (IOM_SUCCESS(rcStrict))
5592 {
5593 switch (cbReg)
5594 {
5595 case 1: pCtx->al = (uint8_t)u32Value; break;
5596 case 2: pCtx->ax = (uint16_t)u32Value; break;
5597 case 4: pCtx->rax = u32Value; break;
5598 default: AssertFailedReturn(VERR_IEM_IPE_3);
5599 }
5600 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5601 pIemCpu->cPotentialExits++;
5602 if (rcStrict != VINF_SUCCESS)
5603 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
5604 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
5605
5606 /*
5607 * Check for I/O breakpoints.
5608 */
5609 uint32_t const uDr7 = pCtx->dr[7];
5610 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
5611 && X86_DR7_ANY_RW_IO(uDr7)
5612 && (pCtx->cr4 & X86_CR4_DE))
5613 || DBGFBpIsHwIoArmed(IEMCPU_TO_VM(pIemCpu))))
5614 {
5615 rcStrict = DBGFBpCheckIo(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), pCtx, u16Port, cbReg);
5616 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
5617 rcStrict = iemRaiseDebugException(pIemCpu);
5618 }
5619 }
5620
5621 return rcStrict;
5622}
5623
5624
5625/**
5626 * Implements 'IN eAX, DX'.
5627 *
5628 * @param cbReg The register size.
5629 */
5630IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
5631{
5632 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
5633}
5634
5635
5636/**
5637 * Implements 'OUT port, eAX'.
5638 *
5639 * @param u16Port The destination port.
5640 * @param cbReg The register size.
5641 */
5642IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
5643{
5644 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5645
5646 /*
5647 * CPL check
5648 */
5649 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
5650 if (rcStrict != VINF_SUCCESS)
5651 return rcStrict;
5652
5653 /*
5654 * Perform the I/O.
5655 */
5656 uint32_t u32Value;
5657 switch (cbReg)
5658 {
5659 case 1: u32Value = pCtx->al; break;
5660 case 2: u32Value = pCtx->ax; break;
5661 case 4: u32Value = pCtx->eax; break;
5662 default: AssertFailedReturn(VERR_IEM_IPE_4);
5663 }
5664 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5665 rcStrict = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, u32Value, cbReg);
5666 else
5667 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
5668 if (IOM_SUCCESS(rcStrict))
5669 {
5670 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5671 pIemCpu->cPotentialExits++;
5672 if (rcStrict != VINF_SUCCESS)
5673 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
5674 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
5675
5676 /*
5677 * Check for I/O breakpoints.
5678 */
5679 uint32_t const uDr7 = pCtx->dr[7];
5680 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
5681 && X86_DR7_ANY_RW_IO(uDr7)
5682 && (pCtx->cr4 & X86_CR4_DE))
5683 || DBGFBpIsHwIoArmed(IEMCPU_TO_VM(pIemCpu))))
5684 {
5685 rcStrict = DBGFBpCheckIo(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), pCtx, u16Port, cbReg);
5686 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
5687 rcStrict = iemRaiseDebugException(pIemCpu);
5688 }
5689 }
5690 return rcStrict;
5691}
5692
5693
5694/**
5695 * Implements 'OUT DX, eAX'.
5696 *
5697 * @param cbReg The register size.
5698 */
5699IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
5700{
5701 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
5702}
5703
5704
5705/**
5706 * Implements 'CLI'.
5707 */
5708IEM_CIMPL_DEF_0(iemCImpl_cli)
5709{
5710 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5711 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
5712 uint32_t const fEflOld = fEfl;
5713 if (pCtx->cr0 & X86_CR0_PE)
5714 {
5715 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
5716 if (!(fEfl & X86_EFL_VM))
5717 {
5718 if (pIemCpu->uCpl <= uIopl)
5719 fEfl &= ~X86_EFL_IF;
5720 else if ( pIemCpu->uCpl == 3
5721 && (pCtx->cr4 & X86_CR4_PVI) )
5722 fEfl &= ~X86_EFL_VIF;
5723 else
5724 return iemRaiseGeneralProtectionFault0(pIemCpu);
5725 }
5726 /* V8086 */
5727 else if (uIopl == 3)
5728 fEfl &= ~X86_EFL_IF;
5729 else if ( uIopl < 3
5730 && (pCtx->cr4 & X86_CR4_VME) )
5731 fEfl &= ~X86_EFL_VIF;
5732 else
5733 return iemRaiseGeneralProtectionFault0(pIemCpu);
5734 }
5735 /* real mode */
5736 else
5737 fEfl &= ~X86_EFL_IF;
5738
5739 /* Commit. */
5740 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
5741 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5742 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld);
5743 return VINF_SUCCESS;
5744}
5745
5746
5747/**
5748 * Implements 'STI'.
5749 */
5750IEM_CIMPL_DEF_0(iemCImpl_sti)
5751{
5752 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5753 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
5754 uint32_t const fEflOld = fEfl;
5755
5756 if (pCtx->cr0 & X86_CR0_PE)
5757 {
5758 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
5759 if (!(fEfl & X86_EFL_VM))
5760 {
5761 if (pIemCpu->uCpl <= uIopl)
5762 fEfl |= X86_EFL_IF;
5763 else if ( pIemCpu->uCpl == 3
5764 && (pCtx->cr4 & X86_CR4_PVI)
5765 && !(fEfl & X86_EFL_VIP) )
5766 fEfl |= X86_EFL_VIF;
5767 else
5768 return iemRaiseGeneralProtectionFault0(pIemCpu);
5769 }
5770 /* V8086 */
5771 else if (uIopl == 3)
5772 fEfl |= X86_EFL_IF;
5773 else if ( uIopl < 3
5774 && (pCtx->cr4 & X86_CR4_VME)
5775 && !(fEfl & X86_EFL_VIP) )
5776 fEfl |= X86_EFL_VIF;
5777 else
5778 return iemRaiseGeneralProtectionFault0(pIemCpu);
5779 }
5780 /* real mode */
5781 else
5782 fEfl |= X86_EFL_IF;
5783
5784 /* Commit. */
5785 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
5786 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5787 if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
5788 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
5789 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
5790 return VINF_SUCCESS;
5791}
5792
5793
5794/**
5795 * Implements 'HLT'.
5796 */
5797IEM_CIMPL_DEF_0(iemCImpl_hlt)
5798{
5799 if (pIemCpu->uCpl != 0)
5800 return iemRaiseGeneralProtectionFault0(pIemCpu);
5801 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5802 return VINF_EM_HALT;
5803}
5804
5805
5806/**
5807 * Implements 'MONITOR'.
5808 */
5809IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
5810{
5811 /*
5812 * Permission checks.
5813 */
5814 if (pIemCpu->uCpl != 0)
5815 {
5816 Log2(("monitor: CPL != 0\n"));
5817 return iemRaiseUndefinedOpcode(pIemCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
5818 }
5819 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMonitorMWait)
5820 {
5821 Log2(("monitor: Not in CPUID\n"));
5822 return iemRaiseUndefinedOpcode(pIemCpu);
5823 }
5824
5825 /*
5826 * Gather the operands and validate them.
5827 */
5828 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5829 RTGCPTR GCPtrMem = pIemCpu->enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
5830 uint32_t uEcx = pCtx->ecx;
5831 uint32_t uEdx = pCtx->edx;
5832/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
5833 * \#GP first. */
5834 if (uEcx != 0)
5835 {
5836 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx);
5837 return iemRaiseGeneralProtectionFault0(pIemCpu);
5838 }
5839
5840 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
5841 if (rcStrict != VINF_SUCCESS)
5842 return rcStrict;
5843
5844 RTGCPHYS GCPhysMem;
5845 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
5846 if (rcStrict != VINF_SUCCESS)
5847 return rcStrict;
5848
5849 /*
5850 * Call EM to prepare the monitor/wait.
5851 */
5852 rcStrict = EMMonitorWaitPrepare(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rax, pCtx->rcx, pCtx->rdx, GCPhysMem);
5853 Assert(rcStrict == VINF_SUCCESS);
5854
5855 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5856 return rcStrict;
5857}
5858
5859
5860/**
5861 * Implements 'MWAIT'.
5862 */
5863IEM_CIMPL_DEF_0(iemCImpl_mwait)
5864{
5865 /*
5866 * Permission checks.
5867 */
5868 if (pIemCpu->uCpl != 0)
5869 {
5870 Log2(("mwait: CPL != 0\n"));
5871 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
5872 * EFLAGS.VM then.) */
5873 return iemRaiseUndefinedOpcode(pIemCpu);
5874 }
5875 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMonitorMWait)
5876 {
5877 Log2(("mwait: Not in CPUID\n"));
5878 return iemRaiseUndefinedOpcode(pIemCpu);
5879 }
5880
5881 /*
5882 * Gather the operands and validate them.
5883 */
5884 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5885 uint32_t uEax = pCtx->eax;
5886 uint32_t uEcx = pCtx->ecx;
5887 if (uEcx != 0)
5888 {
5889 /* Only supported extension is break on IRQ when IF=0. */
5890 if (uEcx > 1)
5891 {
5892 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
5893 return iemRaiseGeneralProtectionFault0(pIemCpu);
5894 }
5895 uint32_t fMWaitFeatures = 0;
5896 uint32_t uIgnore = 0;
5897 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 5, 0, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
5898 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
5899 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
5900 {
5901 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
5902 return iemRaiseGeneralProtectionFault0(pIemCpu);
5903 }
5904 }
5905
5906 /*
5907 * Call EM to prepare the monitor/wait.
5908 */
5909 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(IEMCPU_TO_VMCPU(pIemCpu), uEax, uEcx);
5910
5911 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5912 return rcStrict;
5913}
5914
5915
5916/**
5917 * Implements 'SWAPGS'.
5918 */
5919IEM_CIMPL_DEF_0(iemCImpl_swapgs)
5920{
5921 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT); /* Caller checks this. */
5922
5923 /*
5924 * Permission checks.
5925 */
5926 if (pIemCpu->uCpl != 0)
5927 {
5928 Log2(("swapgs: CPL != 0\n"));
5929 return iemRaiseUndefinedOpcode(pIemCpu);
5930 }
5931
5932 /*
5933 * Do the job.
5934 */
5935 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5936 uint64_t uOtherGsBase = pCtx->msrKERNELGSBASE;
5937 pCtx->msrKERNELGSBASE = pCtx->gs.u64Base;
5938 pCtx->gs.u64Base = uOtherGsBase;
5939
5940 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5941 return VINF_SUCCESS;
5942}
5943
5944
5945/**
5946 * Implements 'CPUID'.
5947 */
5948IEM_CIMPL_DEF_0(iemCImpl_cpuid)
5949{
5950 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5951
5952 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
5953 pCtx->rax &= UINT32_C(0xffffffff);
5954 pCtx->rbx &= UINT32_C(0xffffffff);
5955 pCtx->rcx &= UINT32_C(0xffffffff);
5956 pCtx->rdx &= UINT32_C(0xffffffff);
5957
5958 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5959 return VINF_SUCCESS;
5960}
5961
5962
5963/**
5964 * Implements 'AAD'.
5965 *
5966 * @param bImm The immediate operand.
5967 */
5968IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
5969{
5970 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5971
5972 uint16_t const ax = pCtx->ax;
5973 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
5974 pCtx->ax = al;
5975 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
5976 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
5977 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
5978
5979 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5980 return VINF_SUCCESS;
5981}
5982
5983
5984/**
5985 * Implements 'AAM'.
5986 *
5987 * @param bImm The immediate operand. Cannot be 0.
5988 */
5989IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
5990{
5991 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5992 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
5993
5994 uint16_t const ax = pCtx->ax;
5995 uint8_t const al = (uint8_t)ax % bImm;
5996 uint8_t const ah = (uint8_t)ax / bImm;
5997 pCtx->ax = (ah << 8) + al;
5998 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
5999 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
6000 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
6001
6002 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6003 return VINF_SUCCESS;
6004}
6005
6006
6007/**
6008 * Implements 'DAA'.
6009 */
6010IEM_CIMPL_DEF_0(iemCImpl_daa)
6011{
6012 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6013
6014 uint8_t const al = pCtx->al;
6015 bool const fCarry = pCtx->eflags.Bits.u1CF;
6016
6017 if ( pCtx->eflags.Bits.u1AF
6018 || (al & 0xf) >= 10)
6019 {
6020 pCtx->al = al + 6;
6021 pCtx->eflags.Bits.u1AF = 1;
6022 }
6023 else
6024 pCtx->eflags.Bits.u1AF = 0;
6025
6026 if (al >= 0x9a || fCarry)
6027 {
6028 pCtx->al += 0x60;
6029 pCtx->eflags.Bits.u1CF = 1;
6030 }
6031 else
6032 pCtx->eflags.Bits.u1CF = 0;
6033
6034 iemHlpUpdateArithEFlagsU8(pIemCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6035 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6036 return VINF_SUCCESS;
6037}
6038
6039
6040/**
6041 * Implements 'DAS'.
6042 */
6043IEM_CIMPL_DEF_0(iemCImpl_das)
6044{
6045 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6046
6047 uint8_t const uInputAL = pCtx->al;
6048 bool const fCarry = pCtx->eflags.Bits.u1CF;
6049
6050 if ( pCtx->eflags.Bits.u1AF
6051 || (uInputAL & 0xf) >= 10)
6052 {
6053 pCtx->eflags.Bits.u1AF = 1;
6054 if (uInputAL < 6)
6055 pCtx->eflags.Bits.u1CF = 1;
6056 pCtx->al = uInputAL - 6;
6057 }
6058 else
6059 {
6060 pCtx->eflags.Bits.u1AF = 0;
6061 pCtx->eflags.Bits.u1CF = 0;
6062 }
6063
6064 if (uInputAL >= 0x9a || fCarry)
6065 {
6066 pCtx->al -= 0x60;
6067 pCtx->eflags.Bits.u1CF = 1;
6068 }
6069
6070 iemHlpUpdateArithEFlagsU8(pIemCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6071 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6072 return VINF_SUCCESS;
6073}
6074
6075
6076
6077
6078/*
6079 * Instantiate the various string operation combinations.
6080 */
6081#define OP_SIZE 8
6082#define ADDR_SIZE 16
6083#include "IEMAllCImplStrInstr.cpp.h"
6084#define OP_SIZE 8
6085#define ADDR_SIZE 32
6086#include "IEMAllCImplStrInstr.cpp.h"
6087#define OP_SIZE 8
6088#define ADDR_SIZE 64
6089#include "IEMAllCImplStrInstr.cpp.h"
6090
6091#define OP_SIZE 16
6092#define ADDR_SIZE 16
6093#include "IEMAllCImplStrInstr.cpp.h"
6094#define OP_SIZE 16
6095#define ADDR_SIZE 32
6096#include "IEMAllCImplStrInstr.cpp.h"
6097#define OP_SIZE 16
6098#define ADDR_SIZE 64
6099#include "IEMAllCImplStrInstr.cpp.h"
6100
6101#define OP_SIZE 32
6102#define ADDR_SIZE 16
6103#include "IEMAllCImplStrInstr.cpp.h"
6104#define OP_SIZE 32
6105#define ADDR_SIZE 32
6106#include "IEMAllCImplStrInstr.cpp.h"
6107#define OP_SIZE 32
6108#define ADDR_SIZE 64
6109#include "IEMAllCImplStrInstr.cpp.h"
6110
6111#define OP_SIZE 64
6112#define ADDR_SIZE 32
6113#include "IEMAllCImplStrInstr.cpp.h"
6114#define OP_SIZE 64
6115#define ADDR_SIZE 64
6116#include "IEMAllCImplStrInstr.cpp.h"
6117
6118
6119/**
6120 * Implements 'XGETBV'.
6121 */
6122IEM_CIMPL_DEF_0(iemCImpl_xgetbv)
6123{
6124 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6125 if (pCtx->cr4 & X86_CR4_OSXSAVE)
6126 {
6127 uint32_t uEcx = pCtx->ecx;
6128 switch (uEcx)
6129 {
6130 case 0:
6131 break;
6132
6133 case 1: /** @todo Implement XCR1 support. */
6134 default:
6135 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx));
6136 return iemRaiseGeneralProtectionFault0(pIemCpu);
6137
6138 }
6139 pCtx->rax = RT_LO_U32(pCtx->aXcr[uEcx]);
6140 pCtx->rdx = RT_HI_U32(pCtx->aXcr[uEcx]);
6141
6142 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6143 return VINF_SUCCESS;
6144 }
6145 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n"));
6146 return iemRaiseUndefinedOpcode(pIemCpu);
6147}
6148
6149
6150/**
6151 * Implements 'XSETBV'.
6152 */
6153IEM_CIMPL_DEF_0(iemCImpl_xsetbv)
6154{
6155 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6156 if (pCtx->cr4 & X86_CR4_OSXSAVE)
6157 {
6158 if (pIemCpu->uCpl == 0)
6159 {
6160 uint32_t uEcx = pCtx->ecx;
6161 uint64_t uNewValue = RT_MAKE_U64(pCtx->eax, pCtx->edx);
6162 switch (uEcx)
6163 {
6164 case 0:
6165 {
6166 int rc = CPUMSetGuestXcr0(IEMCPU_TO_VMCPU(pIemCpu), uNewValue);
6167 if (rc == VINF_SUCCESS)
6168 break;
6169 Assert(rc == VERR_CPUM_RAISE_GP_0);
6170 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
6171 return iemRaiseGeneralProtectionFault0(pIemCpu);
6172 }
6173
6174 case 1: /** @todo Implement XCR1 support. */
6175 default:
6176 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
6177 return iemRaiseGeneralProtectionFault0(pIemCpu);
6178
6179 }
6180
6181 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6182 return VINF_SUCCESS;
6183 }
6184
6185 Log(("xsetbv cpl=%u -> GP(0)\n", pIemCpu->uCpl));
6186 return iemRaiseGeneralProtectionFault0(pIemCpu);
6187 }
6188 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n"));
6189 return iemRaiseUndefinedOpcode(pIemCpu);
6190}
6191
6192
6193
6194/**
6195 * Implements 'FINIT' and 'FNINIT'.
6196 *
6197 * @param fCheckXcpts Whether to check for umasked pending exceptions or
6198 * not.
6199 */
6200IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
6201{
6202 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6203
6204 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
6205 return iemRaiseDeviceNotAvailable(pIemCpu);
6206
6207 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
6208 if (fCheckXcpts && TODO )
6209 return iemRaiseMathFault(pIemCpu);
6210 */
6211
6212 PX86XSAVEAREA pXState = pCtx->CTX_SUFF(pXState);
6213 pXState->x87.FCW = 0x37f;
6214 pXState->x87.FSW = 0;
6215 pXState->x87.FTW = 0x00; /* 0 - empty. */
6216 pXState->x87.FPUDP = 0;
6217 pXState->x87.DS = 0; //??
6218 pXState->x87.Rsrvd2= 0;
6219 pXState->x87.FPUIP = 0;
6220 pXState->x87.CS = 0; //??
6221 pXState->x87.Rsrvd1= 0;
6222 pXState->x87.FOP = 0;
6223
6224 iemHlpUsedFpu(pIemCpu);
6225 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6226 return VINF_SUCCESS;
6227}
6228
6229
6230/**
6231 * Implements 'FXSAVE'.
6232 *
6233 * @param iEffSeg The effective segment.
6234 * @param GCPtrEff The address of the image.
6235 * @param enmEffOpSize The operand size (only REX.W really matters).
6236 */
6237IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
6238{
6239 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6240
6241 /*
6242 * Raise exceptions.
6243 */
6244 if (pCtx->cr0 & X86_CR0_EM)
6245 return iemRaiseUndefinedOpcode(pIemCpu);
6246 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
6247 return iemRaiseDeviceNotAvailable(pIemCpu);
6248 if (GCPtrEff & 15)
6249 {
6250 /** @todo CPU/VM detection possible! \#AC might not be signal for
6251 * all/any misalignment sizes, intel says its an implementation detail. */
6252 if ( (pCtx->cr0 & X86_CR0_AM)
6253 && pCtx->eflags.Bits.u1AC
6254 && pIemCpu->uCpl == 3)
6255 return iemRaiseAlignmentCheckException(pIemCpu);
6256 return iemRaiseGeneralProtectionFault0(pIemCpu);
6257 }
6258
6259 /*
6260 * Access the memory.
6261 */
6262 void *pvMem512;
6263 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6264 if (rcStrict != VINF_SUCCESS)
6265 return rcStrict;
6266 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
6267 PCX86FXSTATE pSrc = &pCtx->CTX_SUFF(pXState)->x87;
6268
6269 /*
6270 * Store the registers.
6271 */
6272 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
6273 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
6274
6275 /* common for all formats */
6276 pDst->FCW = pSrc->FCW;
6277 pDst->FSW = pSrc->FSW;
6278 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
6279 pDst->FOP = pSrc->FOP;
6280 pDst->MXCSR = pSrc->MXCSR;
6281 pDst->MXCSR_MASK = pSrc->MXCSR_MASK;
6282 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
6283 {
6284 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
6285 * them for now... */
6286 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
6287 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
6288 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
6289 pDst->aRegs[i].au32[3] = 0;
6290 }
6291
6292 /* FPU IP, CS, DP and DS. */
6293 pDst->FPUIP = pSrc->FPUIP;
6294 pDst->CS = pSrc->CS;
6295 pDst->FPUDP = pSrc->FPUDP;
6296 pDst->DS = pSrc->DS;
6297 if (enmEffOpSize == IEMMODE_64BIT)
6298 {
6299 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
6300 pDst->Rsrvd1 = pSrc->Rsrvd1;
6301 pDst->Rsrvd2 = pSrc->Rsrvd2;
6302 pDst->au32RsrvdForSoftware[0] = 0;
6303 }
6304 else
6305 {
6306 pDst->Rsrvd1 = 0;
6307 pDst->Rsrvd2 = 0;
6308 pDst->au32RsrvdForSoftware[0] = X86_FXSTATE_RSVD_32BIT_MAGIC;
6309 }
6310
6311 /* XMM registers. */
6312 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
6313 || pIemCpu->enmCpuMode != IEMMODE_64BIT
6314 || pIemCpu->uCpl != 0)
6315 {
6316 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
6317 for (uint32_t i = 0; i < cXmmRegs; i++)
6318 pDst->aXMM[i] = pSrc->aXMM[i];
6319 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
6320 * right? */
6321 }
6322
6323 /*
6324 * Commit the memory.
6325 */
6326 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6327 if (rcStrict != VINF_SUCCESS)
6328 return rcStrict;
6329
6330 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6331 return VINF_SUCCESS;
6332}
6333
6334
6335/**
6336 * Implements 'FXRSTOR'.
6337 *
6338 * @param GCPtrEff The address of the image.
6339 * @param enmEffOpSize The operand size (only REX.W really matters).
6340 */
6341IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
6342{
6343 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6344
6345 /*
6346 * Raise exceptions.
6347 */
6348 if (pCtx->cr0 & X86_CR0_EM)
6349 return iemRaiseUndefinedOpcode(pIemCpu);
6350 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
6351 return iemRaiseDeviceNotAvailable(pIemCpu);
6352 if (GCPtrEff & 15)
6353 {
6354 /** @todo CPU/VM detection possible! \#AC might not be signal for
6355 * all/any misalignment sizes, intel says its an implementation detail. */
6356 if ( (pCtx->cr0 & X86_CR0_AM)
6357 && pCtx->eflags.Bits.u1AC
6358 && pIemCpu->uCpl == 3)
6359 return iemRaiseAlignmentCheckException(pIemCpu);
6360 return iemRaiseGeneralProtectionFault0(pIemCpu);
6361 }
6362
6363 /*
6364 * Access the memory.
6365 */
6366 void *pvMem512;
6367 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
6368 if (rcStrict != VINF_SUCCESS)
6369 return rcStrict;
6370 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
6371 PX86FXSTATE pDst = &pCtx->CTX_SUFF(pXState)->x87;
6372
6373 /*
6374 * Check the state for stuff which will #GP(0).
6375 */
6376 uint32_t const fMXCSR = pSrc->MXCSR;
6377 uint32_t const fMXCSR_MASK = pDst->MXCSR_MASK ? pDst->MXCSR_MASK : UINT32_C(0xffbf);
6378 if (fMXCSR & ~fMXCSR_MASK)
6379 {
6380 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
6381 return iemRaiseGeneralProtectionFault0(pIemCpu);
6382 }
6383
6384 /*
6385 * Load the registers.
6386 */
6387 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
6388 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
6389
6390 /* common for all formats */
6391 pDst->FCW = pSrc->FCW;
6392 pDst->FSW = pSrc->FSW;
6393 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
6394 pDst->FOP = pSrc->FOP;
6395 pDst->MXCSR = fMXCSR;
6396 /* (MXCSR_MASK is read-only) */
6397 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
6398 {
6399 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
6400 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
6401 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
6402 pDst->aRegs[i].au32[3] = 0;
6403 }
6404
6405 /* FPU IP, CS, DP and DS. */
6406 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6407 {
6408 pDst->FPUIP = pSrc->FPUIP;
6409 pDst->CS = pSrc->CS;
6410 pDst->Rsrvd1 = pSrc->Rsrvd1;
6411 pDst->FPUDP = pSrc->FPUDP;
6412 pDst->DS = pSrc->DS;
6413 pDst->Rsrvd2 = pSrc->Rsrvd2;
6414 }
6415 else
6416 {
6417 pDst->FPUIP = pSrc->FPUIP;
6418 pDst->CS = pSrc->CS;
6419 pDst->Rsrvd1 = 0;
6420 pDst->FPUDP = pSrc->FPUDP;
6421 pDst->DS = pSrc->DS;
6422 pDst->Rsrvd2 = 0;
6423 }
6424
6425 /* XMM registers. */
6426 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
6427 || pIemCpu->enmCpuMode != IEMMODE_64BIT
6428 || pIemCpu->uCpl != 0)
6429 {
6430 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
6431 for (uint32_t i = 0; i < cXmmRegs; i++)
6432 pDst->aXMM[i] = pSrc->aXMM[i];
6433 }
6434
6435 /*
6436 * Commit the memory.
6437 */
6438 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
6439 if (rcStrict != VINF_SUCCESS)
6440 return rcStrict;
6441
6442 iemHlpUsedFpu(pIemCpu);
6443 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6444 return VINF_SUCCESS;
6445}
6446
6447
6448/**
6449 * Commmon routine for fnstenv and fnsave.
6450 *
6451 * @param uPtr Where to store the state.
6452 * @param pCtx The CPU context.
6453 */
6454static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
6455{
6456 PCX86FXSTATE pSrcX87 = &pCtx->CTX_SUFF(pXState)->x87;
6457 if (enmEffOpSize == IEMMODE_16BIT)
6458 {
6459 uPtr.pu16[0] = pSrcX87->FCW;
6460 uPtr.pu16[1] = pSrcX87->FSW;
6461 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87);
6462 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6463 {
6464 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
6465 * protected mode or long mode and we save it in real mode? And vice
6466 * versa? And with 32-bit operand size? I think CPU is storing the
6467 * effective address ((CS << 4) + IP) in the offset register and not
6468 * doing any address calculations here. */
6469 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP;
6470 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP;
6471 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP;
6472 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000);
6473 }
6474 else
6475 {
6476 uPtr.pu16[3] = pSrcX87->FPUIP;
6477 uPtr.pu16[4] = pSrcX87->CS;
6478 uPtr.pu16[5] = pSrcX87->FPUDP;
6479 uPtr.pu16[6] = pSrcX87->DS;
6480 }
6481 }
6482 else
6483 {
6484 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
6485 uPtr.pu16[0*2] = pSrcX87->FCW;
6486 uPtr.pu16[1*2] = pSrcX87->FSW;
6487 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87);
6488 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6489 {
6490 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP;
6491 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP;
6492 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP;
6493 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4;
6494 }
6495 else
6496 {
6497 uPtr.pu32[3] = pSrcX87->FPUIP;
6498 uPtr.pu16[4*2] = pSrcX87->CS;
6499 uPtr.pu16[4*2+1]= pSrcX87->FOP;
6500 uPtr.pu32[5] = pSrcX87->FPUDP;
6501 uPtr.pu16[6*2] = pSrcX87->DS;
6502 }
6503 }
6504}
6505
6506
6507/**
6508 * Commmon routine for fldenv and frstor
6509 *
6510 * @param uPtr Where to store the state.
6511 * @param pCtx The CPU context.
6512 */
6513static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
6514{
6515 PX86FXSTATE pDstX87 = &pCtx->CTX_SUFF(pXState)->x87;
6516 if (enmEffOpSize == IEMMODE_16BIT)
6517 {
6518 pDstX87->FCW = uPtr.pu16[0];
6519 pDstX87->FSW = uPtr.pu16[1];
6520 pDstX87->FTW = uPtr.pu16[2];
6521 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6522 {
6523 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
6524 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
6525 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
6526 pDstX87->CS = 0;
6527 pDstX87->Rsrvd1= 0;
6528 pDstX87->DS = 0;
6529 pDstX87->Rsrvd2= 0;
6530 }
6531 else
6532 {
6533 pDstX87->FPUIP = uPtr.pu16[3];
6534 pDstX87->CS = uPtr.pu16[4];
6535 pDstX87->Rsrvd1= 0;
6536 pDstX87->FPUDP = uPtr.pu16[5];
6537 pDstX87->DS = uPtr.pu16[6];
6538 pDstX87->Rsrvd2= 0;
6539 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
6540 }
6541 }
6542 else
6543 {
6544 pDstX87->FCW = uPtr.pu16[0*2];
6545 pDstX87->FSW = uPtr.pu16[1*2];
6546 pDstX87->FTW = uPtr.pu16[2*2];
6547 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6548 {
6549 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
6550 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
6551 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
6552 pDstX87->CS = 0;
6553 pDstX87->Rsrvd1= 0;
6554 pDstX87->DS = 0;
6555 pDstX87->Rsrvd2= 0;
6556 }
6557 else
6558 {
6559 pDstX87->FPUIP = uPtr.pu32[3];
6560 pDstX87->CS = uPtr.pu16[4*2];
6561 pDstX87->Rsrvd1= 0;
6562 pDstX87->FOP = uPtr.pu16[4*2+1];
6563 pDstX87->FPUDP = uPtr.pu32[5];
6564 pDstX87->DS = uPtr.pu16[6*2];
6565 pDstX87->Rsrvd2= 0;
6566 }
6567 }
6568
6569 /* Make adjustments. */
6570 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW);
6571 pDstX87->FCW &= ~X86_FCW_ZERO_MASK;
6572 iemFpuRecalcExceptionStatus(pDstX87);
6573 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
6574 * exceptions are pending after loading the saved state? */
6575}
6576
6577
6578/**
6579 * Implements 'FNSTENV'.
6580 *
6581 * @param enmEffOpSize The operand size (only REX.W really matters).
6582 * @param iEffSeg The effective segment register for @a GCPtrEff.
6583 * @param GCPtrEffDst The address of the image.
6584 */
6585IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
6586{
6587 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6588 RTPTRUNION uPtr;
6589 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
6590 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6591 if (rcStrict != VINF_SUCCESS)
6592 return rcStrict;
6593
6594 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6595
6596 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6597 if (rcStrict != VINF_SUCCESS)
6598 return rcStrict;
6599
6600 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
6601 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6602 return VINF_SUCCESS;
6603}
6604
6605
6606/**
6607 * Implements 'FNSAVE'.
6608 *
6609 * @param GCPtrEffDst The address of the image.
6610 * @param enmEffOpSize The operand size.
6611 */
6612IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
6613{
6614 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6615 RTPTRUNION uPtr;
6616 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
6617 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6618 if (rcStrict != VINF_SUCCESS)
6619 return rcStrict;
6620
6621 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6622 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6623 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
6624 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
6625 {
6626 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0];
6627 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1];
6628 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4];
6629 }
6630
6631 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6632 if (rcStrict != VINF_SUCCESS)
6633 return rcStrict;
6634
6635 /*
6636 * Re-initialize the FPU context.
6637 */
6638 pFpuCtx->FCW = 0x37f;
6639 pFpuCtx->FSW = 0;
6640 pFpuCtx->FTW = 0x00; /* 0 - empty */
6641 pFpuCtx->FPUDP = 0;
6642 pFpuCtx->DS = 0;
6643 pFpuCtx->Rsrvd2= 0;
6644 pFpuCtx->FPUIP = 0;
6645 pFpuCtx->CS = 0;
6646 pFpuCtx->Rsrvd1= 0;
6647 pFpuCtx->FOP = 0;
6648
6649 iemHlpUsedFpu(pIemCpu);
6650 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6651 return VINF_SUCCESS;
6652}
6653
6654
6655
6656/**
6657 * Implements 'FLDENV'.
6658 *
6659 * @param enmEffOpSize The operand size (only REX.W really matters).
6660 * @param iEffSeg The effective segment register for @a GCPtrEff.
6661 * @param GCPtrEffSrc The address of the image.
6662 */
6663IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
6664{
6665 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6666 RTCPTRUNION uPtr;
6667 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
6668 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
6669 if (rcStrict != VINF_SUCCESS)
6670 return rcStrict;
6671
6672 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6673
6674 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
6675 if (rcStrict != VINF_SUCCESS)
6676 return rcStrict;
6677
6678 iemHlpUsedFpu(pIemCpu);
6679 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6680 return VINF_SUCCESS;
6681}
6682
6683
6684/**
6685 * Implements 'FRSTOR'.
6686 *
6687 * @param GCPtrEffSrc The address of the image.
6688 * @param enmEffOpSize The operand size.
6689 */
6690IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
6691{
6692 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6693 RTCPTRUNION uPtr;
6694 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
6695 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
6696 if (rcStrict != VINF_SUCCESS)
6697 return rcStrict;
6698
6699 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6700 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6701 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
6702 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
6703 {
6704 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0];
6705 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1];
6706 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4];
6707 pFpuCtx->aRegs[i].au32[3] = 0;
6708 }
6709
6710 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
6711 if (rcStrict != VINF_SUCCESS)
6712 return rcStrict;
6713
6714 iemHlpUsedFpu(pIemCpu);
6715 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6716 return VINF_SUCCESS;
6717}
6718
6719
6720/**
6721 * Implements 'FLDCW'.
6722 *
6723 * @param u16Fcw The new FCW.
6724 */
6725IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
6726{
6727 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6728
6729 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
6730 /** @todo Testcase: Try see what happens when trying to set undefined bits
6731 * (other than 6 and 7). Currently ignoring them. */
6732 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
6733 * according to FSW. (This is was is currently implemented.) */
6734 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6735 pFpuCtx->FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
6736 iemFpuRecalcExceptionStatus(pFpuCtx);
6737
6738 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
6739 iemHlpUsedFpu(pIemCpu);
6740 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6741 return VINF_SUCCESS;
6742}
6743
6744
6745
6746/**
6747 * Implements the underflow case of fxch.
6748 *
6749 * @param iStReg The other stack register.
6750 */
6751IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
6752{
6753 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6754
6755 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6756 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW);
6757 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
6758 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW));
6759
6760 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
6761 * registers are read as QNaN and then exchanged. This could be
6762 * wrong... */
6763 if (pFpuCtx->FCW & X86_FCW_IM)
6764 {
6765 if (RT_BIT(iReg1) & pFpuCtx->FTW)
6766 {
6767 if (RT_BIT(iReg2) & pFpuCtx->FTW)
6768 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6769 else
6770 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80;
6771 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
6772 }
6773 else
6774 {
6775 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80;
6776 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6777 }
6778 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6779 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
6780 }
6781 else
6782 {
6783 /* raise underflow exception, don't change anything. */
6784 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
6785 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6786 }
6787
6788 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
6789 iemHlpUsedFpu(pIemCpu);
6790 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6791 return VINF_SUCCESS;
6792}
6793
6794
6795/**
6796 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
6797 *
6798 * @param cToAdd 1 or 7.
6799 */
6800IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
6801{
6802 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6803 Assert(iStReg < 8);
6804
6805 /*
6806 * Raise exceptions.
6807 */
6808 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
6809 return iemRaiseDeviceNotAvailable(pIemCpu);
6810
6811 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6812 uint16_t u16Fsw = pFpuCtx->FSW;
6813 if (u16Fsw & X86_FSW_ES)
6814 return iemRaiseMathFault(pIemCpu);
6815
6816 /*
6817 * Check if any of the register accesses causes #SF + #IA.
6818 */
6819 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
6820 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
6821 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
6822 {
6823 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
6824 NOREF(u32Eflags);
6825
6826 pFpuCtx->FSW &= ~X86_FSW_C1;
6827 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
6828 if ( !(u16Fsw & X86_FSW_IE)
6829 || (pFpuCtx->FCW & X86_FCW_IM) )
6830 {
6831 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
6832 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
6833 }
6834 }
6835 else if (pFpuCtx->FCW & X86_FCW_IM)
6836 {
6837 /* Masked underflow. */
6838 pFpuCtx->FSW &= ~X86_FSW_C1;
6839 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6840 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
6841 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
6842 }
6843 else
6844 {
6845 /* Raise underflow - don't touch EFLAGS or TOP. */
6846 pFpuCtx->FSW &= ~X86_FSW_C1;
6847 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6848 fPop = false;
6849 }
6850
6851 /*
6852 * Pop if necessary.
6853 */
6854 if (fPop)
6855 {
6856 pFpuCtx->FTW &= ~RT_BIT(iReg1);
6857 pFpuCtx->FSW &= X86_FSW_TOP_MASK;
6858 pFpuCtx->FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
6859 }
6860
6861 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
6862 iemHlpUsedFpu(pIemCpu);
6863 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6864 return VINF_SUCCESS;
6865}
6866
6867/** @} */
6868
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette