VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 60477

Last change on this file since 60477 was 60415, checked in by vboxsync, 9 years ago

IEM: Implemented main characteristics of 8086, 80186 and 80286.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 234.4 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 60415 2016-04-11 08:51:07Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23
24/**
25 * Worker function for iemHlpCheckPortIOPermission, don't call directly.
26 *
27 * @returns Strict VBox status code.
28 *
29 * @param pIemCpu The IEM per CPU data.
30 * @param pCtx The register context.
31 * @param u16Port The port number.
32 * @param cbOperand The operand size.
33 */
34static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
35{
36 /* The TSS bits we're interested in are the same on 386 and AMD64. */
37 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY);
38 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
39 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap);
40 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64));
41
42 /*
43 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
44 */
45 Assert(!pCtx->tr.Attr.n.u1DescType);
46 if (RT_UNLIKELY( pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY
47 && pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))
48 {
49 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
50 u16Port, cbOperand, pCtx->tr.Attr.n.u4Type, pCtx->tr.Attr.u));
51 return iemRaiseGeneralProtectionFault0(pIemCpu);
52 }
53
54 /*
55 * Read the bitmap offset (may #PF).
56 */
57 uint16_t offBitmap;
58 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pIemCpu, &offBitmap, UINT8_MAX,
59 pCtx->tr.u64Base + RT_OFFSETOF(X86TSS64, offIoBitmap));
60 if (rcStrict != VINF_SUCCESS)
61 {
62 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
63 return rcStrict;
64 }
65
66 /*
67 * The bit range from u16Port to (u16Port + cbOperand - 1), however intel
68 * describes the CPU actually reading two bytes regardless of whether the
69 * bit range crosses a byte boundrary. Thus the + 1 in the test below.
70 */
71 uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap;
72 /** @todo check if real CPUs ensures that offBitmap has a minimum value of
73 * for instance sizeof(X86TSS32). */
74 if (offFirstBit + 1 > pCtx->tr.u32Limit) /* the limit is inclusive */
75 {
76 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
77 offFirstBit, pCtx->tr.u32Limit));
78 return iemRaiseGeneralProtectionFault0(pIemCpu);
79 }
80
81 /*
82 * Read the necessary bits.
83 */
84 /** @todo Test the assertion in the intel manual that the CPU reads two
85 * bytes. The question is how this works wrt to #PF and #GP on the
86 * 2nd byte when it's not required. */
87 uint16_t bmBytes = UINT16_MAX;
88 rcStrict = iemMemFetchSysU16(pIemCpu, &bmBytes, UINT8_MAX, pCtx->tr.u64Base + offFirstBit);
89 if (rcStrict != VINF_SUCCESS)
90 {
91 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
92 return rcStrict;
93 }
94
95 /*
96 * Perform the check.
97 */
98 uint16_t fPortMask = (1 << cbOperand) - 1;
99 bmBytes >>= (u16Port & 7);
100 if (bmBytes & fPortMask)
101 {
102 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
103 u16Port, cbOperand, bmBytes, fPortMask));
104 return iemRaiseGeneralProtectionFault0(pIemCpu);
105 }
106
107 return VINF_SUCCESS;
108}
109
110
111/**
112 * Checks if we are allowed to access the given I/O port, raising the
113 * appropriate exceptions if we aren't (or if the I/O bitmap is not
114 * accessible).
115 *
116 * @returns Strict VBox status code.
117 *
118 * @param pIemCpu The IEM per CPU data.
119 * @param pCtx The register context.
120 * @param u16Port The port number.
121 * @param cbOperand The operand size.
122 */
123DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
124{
125 X86EFLAGS Efl;
126 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
127 if ( (pCtx->cr0 & X86_CR0_PE)
128 && ( pIemCpu->uCpl > Efl.Bits.u2IOPL
129 || Efl.Bits.u1VM) )
130 return iemHlpCheckPortIOPermissionBitmap(pIemCpu, pCtx, u16Port, cbOperand);
131 return VINF_SUCCESS;
132}
133
134
135#if 0
136/**
137 * Calculates the parity bit.
138 *
139 * @returns true if the bit is set, false if not.
140 * @param u8Result The least significant byte of the result.
141 */
142static bool iemHlpCalcParityFlag(uint8_t u8Result)
143{
144 /*
145 * Parity is set if the number of bits in the least significant byte of
146 * the result is even.
147 */
148 uint8_t cBits;
149 cBits = u8Result & 1; /* 0 */
150 u8Result >>= 1;
151 cBits += u8Result & 1;
152 u8Result >>= 1;
153 cBits += u8Result & 1;
154 u8Result >>= 1;
155 cBits += u8Result & 1;
156 u8Result >>= 1;
157 cBits += u8Result & 1; /* 4 */
158 u8Result >>= 1;
159 cBits += u8Result & 1;
160 u8Result >>= 1;
161 cBits += u8Result & 1;
162 u8Result >>= 1;
163 cBits += u8Result & 1;
164 return !(cBits & 1);
165}
166#endif /* not used */
167
168
169/**
170 * Updates the specified flags according to a 8-bit result.
171 *
172 * @param pIemCpu The IEM state of the calling EMT.
173 * @param u8Result The result to set the flags according to.
174 * @param fToUpdate The flags to update.
175 * @param fUndefined The flags that are specified as undefined.
176 */
177static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
178{
179 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
180
181 uint32_t fEFlags = pCtx->eflags.u;
182 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
183 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
184 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
185#ifdef IEM_VERIFICATION_MODE_FULL
186 pIemCpu->fUndefinedEFlags |= fUndefined;
187#endif
188}
189
190
191/**
192 * Helper used by iret.
193 *
194 * @param uCpl The new CPL.
195 * @param pSReg Pointer to the segment register.
196 */
197static void iemHlpAdjustSelectorForNewCpl(PIEMCPU pIemCpu, uint8_t uCpl, PCPUMSELREG pSReg)
198{
199#ifdef VBOX_WITH_RAW_MODE_NOT_R0
200 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
201 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
202#else
203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
204#endif
205
206 if ( uCpl > pSReg->Attr.n.u2Dpl
207 && pSReg->Attr.n.u1DescType /* code or data, not system */
208 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
209 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
210 iemHlpLoadNullDataSelectorProt(pIemCpu, pSReg, 0);
211}
212
213
214/**
215 * Indicates that we have modified the FPU state.
216 *
217 * @param pIemCpu The IEM state of the calling EMT.
218 */
219DECLINLINE(void) iemHlpUsedFpu(PIEMCPU pIemCpu)
220{
221 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);
222}
223
224/** @} */
225
226/** @name C Implementations
227 * @{
228 */
229
230/**
231 * Implements a 16-bit popa.
232 */
233IEM_CIMPL_DEF_0(iemCImpl_popa_16)
234{
235 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
236 RTGCPTR GCPtrStart = iemRegGetEffRsp(pIemCpu, pCtx);
237 RTGCPTR GCPtrLast = GCPtrStart + 15;
238 VBOXSTRICTRC rcStrict;
239
240 /*
241 * The docs are a bit hard to comprehend here, but it looks like we wrap
242 * around in real mode as long as none of the individual "popa" crosses the
243 * end of the stack segment. In protected mode we check the whole access
244 * in one go. For efficiency, only do the word-by-word thing if we're in
245 * danger of wrapping around.
246 */
247 /** @todo do popa boundary / wrap-around checks. */
248 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
249 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
250 {
251 /* word-by-word */
252 RTUINT64U TmpRsp;
253 TmpRsp.u = pCtx->rsp;
254 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
255 if (rcStrict == VINF_SUCCESS)
256 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
257 if (rcStrict == VINF_SUCCESS)
258 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
259 if (rcStrict == VINF_SUCCESS)
260 {
261 iemRegAddToRspEx(pIemCpu, pCtx, &TmpRsp, 2); /* sp */
262 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
263 }
264 if (rcStrict == VINF_SUCCESS)
265 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
266 if (rcStrict == VINF_SUCCESS)
267 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
268 if (rcStrict == VINF_SUCCESS)
269 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
270 if (rcStrict == VINF_SUCCESS)
271 {
272 pCtx->rsp = TmpRsp.u;
273 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
274 }
275 }
276 else
277 {
278 uint16_t const *pa16Mem = NULL;
279 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
280 if (rcStrict == VINF_SUCCESS)
281 {
282 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
283 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
284 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
285 /* skip sp */
286 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
287 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
288 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
289 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
290 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
291 if (rcStrict == VINF_SUCCESS)
292 {
293 iemRegAddToRsp(pIemCpu, pCtx, 16);
294 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
295 }
296 }
297 }
298 return rcStrict;
299}
300
301
302/**
303 * Implements a 32-bit popa.
304 */
305IEM_CIMPL_DEF_0(iemCImpl_popa_32)
306{
307 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
308 RTGCPTR GCPtrStart = iemRegGetEffRsp(pIemCpu, pCtx);
309 RTGCPTR GCPtrLast = GCPtrStart + 31;
310 VBOXSTRICTRC rcStrict;
311
312 /*
313 * The docs are a bit hard to comprehend here, but it looks like we wrap
314 * around in real mode as long as none of the individual "popa" crosses the
315 * end of the stack segment. In protected mode we check the whole access
316 * in one go. For efficiency, only do the word-by-word thing if we're in
317 * danger of wrapping around.
318 */
319 /** @todo do popa boundary / wrap-around checks. */
320 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
321 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
322 {
323 /* word-by-word */
324 RTUINT64U TmpRsp;
325 TmpRsp.u = pCtx->rsp;
326 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
327 if (rcStrict == VINF_SUCCESS)
328 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
329 if (rcStrict == VINF_SUCCESS)
330 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
331 if (rcStrict == VINF_SUCCESS)
332 {
333 iemRegAddToRspEx(pIemCpu, pCtx, &TmpRsp, 2); /* sp */
334 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
335 }
336 if (rcStrict == VINF_SUCCESS)
337 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
338 if (rcStrict == VINF_SUCCESS)
339 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
340 if (rcStrict == VINF_SUCCESS)
341 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
342 if (rcStrict == VINF_SUCCESS)
343 {
344#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
345 pCtx->rdi &= UINT32_MAX;
346 pCtx->rsi &= UINT32_MAX;
347 pCtx->rbp &= UINT32_MAX;
348 pCtx->rbx &= UINT32_MAX;
349 pCtx->rdx &= UINT32_MAX;
350 pCtx->rcx &= UINT32_MAX;
351 pCtx->rax &= UINT32_MAX;
352#endif
353 pCtx->rsp = TmpRsp.u;
354 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
355 }
356 }
357 else
358 {
359 uint32_t const *pa32Mem;
360 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
361 if (rcStrict == VINF_SUCCESS)
362 {
363 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
364 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
365 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
366 /* skip esp */
367 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
368 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
369 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
370 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
371 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
372 if (rcStrict == VINF_SUCCESS)
373 {
374 iemRegAddToRsp(pIemCpu, pCtx, 32);
375 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
376 }
377 }
378 }
379 return rcStrict;
380}
381
382
383/**
384 * Implements a 16-bit pusha.
385 */
386IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
387{
388 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
389 RTGCPTR GCPtrTop = iemRegGetEffRsp(pIemCpu, pCtx);
390 RTGCPTR GCPtrBottom = GCPtrTop - 15;
391 VBOXSTRICTRC rcStrict;
392
393 /*
394 * The docs are a bit hard to comprehend here, but it looks like we wrap
395 * around in real mode as long as none of the individual "pushd" crosses the
396 * end of the stack segment. In protected mode we check the whole access
397 * in one go. For efficiency, only do the word-by-word thing if we're in
398 * danger of wrapping around.
399 */
400 /** @todo do pusha boundary / wrap-around checks. */
401 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
402 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
403 {
404 /* word-by-word */
405 RTUINT64U TmpRsp;
406 TmpRsp.u = pCtx->rsp;
407 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
408 if (rcStrict == VINF_SUCCESS)
409 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
410 if (rcStrict == VINF_SUCCESS)
411 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
412 if (rcStrict == VINF_SUCCESS)
413 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
414 if (rcStrict == VINF_SUCCESS)
415 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
416 if (rcStrict == VINF_SUCCESS)
417 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
418 if (rcStrict == VINF_SUCCESS)
419 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
420 if (rcStrict == VINF_SUCCESS)
421 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
422 if (rcStrict == VINF_SUCCESS)
423 {
424 pCtx->rsp = TmpRsp.u;
425 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
426 }
427 }
428 else
429 {
430 GCPtrBottom--;
431 uint16_t *pa16Mem = NULL;
432 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
433 if (rcStrict == VINF_SUCCESS)
434 {
435 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
436 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
437 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
438 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
439 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
440 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
441 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
442 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
443 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
444 if (rcStrict == VINF_SUCCESS)
445 {
446 iemRegSubFromRsp(pIemCpu, pCtx, 16);
447 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
448 }
449 }
450 }
451 return rcStrict;
452}
453
454
455/**
456 * Implements a 32-bit pusha.
457 */
458IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
459{
460 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
461 RTGCPTR GCPtrTop = iemRegGetEffRsp(pIemCpu, pCtx);
462 RTGCPTR GCPtrBottom = GCPtrTop - 31;
463 VBOXSTRICTRC rcStrict;
464
465 /*
466 * The docs are a bit hard to comprehend here, but it looks like we wrap
467 * around in real mode as long as none of the individual "pusha" crosses the
468 * end of the stack segment. In protected mode we check the whole access
469 * in one go. For efficiency, only do the word-by-word thing if we're in
470 * danger of wrapping around.
471 */
472 /** @todo do pusha boundary / wrap-around checks. */
473 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
474 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
475 {
476 /* word-by-word */
477 RTUINT64U TmpRsp;
478 TmpRsp.u = pCtx->rsp;
479 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
480 if (rcStrict == VINF_SUCCESS)
481 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
482 if (rcStrict == VINF_SUCCESS)
483 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
484 if (rcStrict == VINF_SUCCESS)
485 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
486 if (rcStrict == VINF_SUCCESS)
487 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
488 if (rcStrict == VINF_SUCCESS)
489 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
490 if (rcStrict == VINF_SUCCESS)
491 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
492 if (rcStrict == VINF_SUCCESS)
493 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
494 if (rcStrict == VINF_SUCCESS)
495 {
496 pCtx->rsp = TmpRsp.u;
497 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
498 }
499 }
500 else
501 {
502 GCPtrBottom--;
503 uint32_t *pa32Mem;
504 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
505 if (rcStrict == VINF_SUCCESS)
506 {
507 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
508 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
509 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
510 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
511 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
512 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
513 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
514 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
515 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
516 if (rcStrict == VINF_SUCCESS)
517 {
518 iemRegSubFromRsp(pIemCpu, pCtx, 32);
519 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
520 }
521 }
522 }
523 return rcStrict;
524}
525
526
527/**
528 * Implements pushf.
529 *
530 *
531 * @param enmEffOpSize The effective operand size.
532 */
533IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
534{
535 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
536
537 /*
538 * If we're in V8086 mode some care is required (which is why we're in
539 * doing this in a C implementation).
540 */
541 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
542 if ( (fEfl & X86_EFL_VM)
543 && X86_EFL_GET_IOPL(fEfl) != 3 )
544 {
545 Assert(pCtx->cr0 & X86_CR0_PE);
546 if ( enmEffOpSize != IEMMODE_16BIT
547 || !(pCtx->cr4 & X86_CR4_VME))
548 return iemRaiseGeneralProtectionFault0(pIemCpu);
549 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
550 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
551 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
552 }
553
554 /*
555 * Ok, clear RF and VM, adjust for ancient CPUs, and push the flags.
556 */
557 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
558
559 VBOXSTRICTRC rcStrict;
560 switch (enmEffOpSize)
561 {
562 case IEMMODE_16BIT:
563#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
564 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
565 if (pIemCpu->uTargetCpu <= IEMTARGETCPU_186)
566 fEfl |= UINT16_C(0xf000);
567#endif
568 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
569 break;
570 case IEMMODE_32BIT:
571 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
572 break;
573 case IEMMODE_64BIT:
574 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
575 break;
576 IEM_NOT_REACHED_DEFAULT_CASE_RET();
577 }
578 if (rcStrict != VINF_SUCCESS)
579 return rcStrict;
580
581 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
582 return VINF_SUCCESS;
583}
584
585
586/**
587 * Implements popf.
588 *
589 * @param enmEffOpSize The effective operand size.
590 */
591IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
592{
593 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
594 uint32_t const fEflOld = IEMMISC_GET_EFL(pIemCpu, pCtx);
595 VBOXSTRICTRC rcStrict;
596 uint32_t fEflNew;
597
598 /*
599 * V8086 is special as usual.
600 */
601 if (fEflOld & X86_EFL_VM)
602 {
603 /*
604 * Almost anything goes if IOPL is 3.
605 */
606 if (X86_EFL_GET_IOPL(fEflOld) == 3)
607 {
608 switch (enmEffOpSize)
609 {
610 case IEMMODE_16BIT:
611 {
612 uint16_t u16Value;
613 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
614 if (rcStrict != VINF_SUCCESS)
615 return rcStrict;
616 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
617 break;
618 }
619 case IEMMODE_32BIT:
620 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
621 if (rcStrict != VINF_SUCCESS)
622 return rcStrict;
623 break;
624 IEM_NOT_REACHED_DEFAULT_CASE_RET();
625 }
626
627 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
628 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
629 }
630 /*
631 * Interrupt flag virtualization with CR4.VME=1.
632 */
633 else if ( enmEffOpSize == IEMMODE_16BIT
634 && (pCtx->cr4 & X86_CR4_VME) )
635 {
636 uint16_t u16Value;
637 RTUINT64U TmpRsp;
638 TmpRsp.u = pCtx->rsp;
639 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
640 if (rcStrict != VINF_SUCCESS)
641 return rcStrict;
642
643 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
644 * or before? */
645 if ( ( (u16Value & X86_EFL_IF)
646 && (fEflOld & X86_EFL_VIP))
647 || (u16Value & X86_EFL_TF) )
648 return iemRaiseGeneralProtectionFault0(pIemCpu);
649
650 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
651 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
652 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
653 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
654
655 pCtx->rsp = TmpRsp.u;
656 }
657 else
658 return iemRaiseGeneralProtectionFault0(pIemCpu);
659
660 }
661 /*
662 * Not in V8086 mode.
663 */
664 else
665 {
666 /* Pop the flags. */
667 switch (enmEffOpSize)
668 {
669 case IEMMODE_16BIT:
670 {
671 uint16_t u16Value;
672 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
673 if (rcStrict != VINF_SUCCESS)
674 return rcStrict;
675 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
676
677#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
678 /*
679 * Ancient CPU adjustments:
680 * - 8086, 80186, V20/30:
681 * Fixed bits 15:12 bits are not kept correctly internally, mostly for
682 * practical reasons (masking below). We add them when pushing flags.
683 * - 80286:
684 * The NT and IOPL flags cannot be popped from real mode and are
685 * therefore always zero (since a 286 can never exit from PM and
686 * their initial value is zero). This changed on a 386 and can
687 * therefore be used to detect 286 or 386 CPU in real mode.
688 */
689 if ( pIemCpu->uTargetCpu == IEMTARGETCPU_286
690 && !(pCtx->cr0 & X86_CR0_PE) )
691 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL);
692#endif
693 break;
694 }
695 case IEMMODE_32BIT:
696 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
697 if (rcStrict != VINF_SUCCESS)
698 return rcStrict;
699 break;
700 case IEMMODE_64BIT:
701 {
702 uint64_t u64Value;
703 rcStrict = iemMemStackPopU64(pIemCpu, &u64Value);
704 if (rcStrict != VINF_SUCCESS)
705 return rcStrict;
706 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
707 break;
708 }
709 IEM_NOT_REACHED_DEFAULT_CASE_RET();
710 }
711
712 /* Merge them with the current flags. */
713 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
714 || pIemCpu->uCpl == 0)
715 {
716 fEflNew &= X86_EFL_POPF_BITS;
717 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
718 }
719 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
720 {
721 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
722 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
723 }
724 else
725 {
726 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
727 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
728 }
729 }
730
731 /*
732 * Commit the flags.
733 */
734 Assert(fEflNew & RT_BIT_32(1));
735 IEMMISC_SET_EFL(pIemCpu, pCtx, fEflNew);
736 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
737
738 return VINF_SUCCESS;
739}
740
741
742/**
743 * Implements an indirect call.
744 *
745 * @param uNewPC The new program counter (RIP) value (loaded from the
746 * operand).
747 * @param enmEffOpSize The effective operand size.
748 */
749IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
750{
751 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
752 uint16_t uOldPC = pCtx->ip + cbInstr;
753 if (uNewPC > pCtx->cs.u32Limit)
754 return iemRaiseGeneralProtectionFault0(pIemCpu);
755
756 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
757 if (rcStrict != VINF_SUCCESS)
758 return rcStrict;
759
760 pCtx->rip = uNewPC;
761 pCtx->eflags.Bits.u1RF = 0;
762 return VINF_SUCCESS;
763}
764
765
766/**
767 * Implements a 16-bit relative call.
768 *
769 * @param offDisp The displacment offset.
770 */
771IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
772{
773 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
774 uint16_t uOldPC = pCtx->ip + cbInstr;
775 uint16_t uNewPC = uOldPC + offDisp;
776 if (uNewPC > pCtx->cs.u32Limit)
777 return iemRaiseGeneralProtectionFault0(pIemCpu);
778
779 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
780 if (rcStrict != VINF_SUCCESS)
781 return rcStrict;
782
783 pCtx->rip = uNewPC;
784 pCtx->eflags.Bits.u1RF = 0;
785 return VINF_SUCCESS;
786}
787
788
789/**
790 * Implements a 32-bit indirect call.
791 *
792 * @param uNewPC The new program counter (RIP) value (loaded from the
793 * operand).
794 * @param enmEffOpSize The effective operand size.
795 */
796IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
797{
798 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
799 uint32_t uOldPC = pCtx->eip + cbInstr;
800 if (uNewPC > pCtx->cs.u32Limit)
801 return iemRaiseGeneralProtectionFault0(pIemCpu);
802
803 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
804 if (rcStrict != VINF_SUCCESS)
805 return rcStrict;
806
807#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE) && defined(VBOX_WITH_CALL_RECORD)
808 /*
809 * CASM hook for recording interesting indirect calls.
810 */
811 if ( !pCtx->eflags.Bits.u1IF
812 && (pCtx->cr0 & X86_CR0_PG)
813 && !CSAMIsEnabled(IEMCPU_TO_VM(pIemCpu))
814 && pIemCpu->uCpl == 0)
815 {
816 EMSTATE enmState = EMGetState(IEMCPU_TO_VMCPU(pIemCpu));
817 if ( enmState == EMSTATE_IEM_THEN_REM
818 || enmState == EMSTATE_IEM
819 || enmState == EMSTATE_REM)
820 CSAMR3RecordCallAddress(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
821 }
822#endif
823
824 pCtx->rip = uNewPC;
825 pCtx->eflags.Bits.u1RF = 0;
826 return VINF_SUCCESS;
827}
828
829
830/**
831 * Implements a 32-bit relative call.
832 *
833 * @param offDisp The displacment offset.
834 */
835IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
836{
837 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
838 uint32_t uOldPC = pCtx->eip + cbInstr;
839 uint32_t uNewPC = uOldPC + offDisp;
840 if (uNewPC > pCtx->cs.u32Limit)
841 return iemRaiseGeneralProtectionFault0(pIemCpu);
842
843 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
844 if (rcStrict != VINF_SUCCESS)
845 return rcStrict;
846
847 pCtx->rip = uNewPC;
848 pCtx->eflags.Bits.u1RF = 0;
849 return VINF_SUCCESS;
850}
851
852
853/**
854 * Implements a 64-bit indirect call.
855 *
856 * @param uNewPC The new program counter (RIP) value (loaded from the
857 * operand).
858 * @param enmEffOpSize The effective operand size.
859 */
860IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
861{
862 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
863 uint64_t uOldPC = pCtx->rip + cbInstr;
864 if (!IEM_IS_CANONICAL(uNewPC))
865 return iemRaiseGeneralProtectionFault0(pIemCpu);
866
867 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
868 if (rcStrict != VINF_SUCCESS)
869 return rcStrict;
870
871 pCtx->rip = uNewPC;
872 pCtx->eflags.Bits.u1RF = 0;
873 return VINF_SUCCESS;
874}
875
876
877/**
878 * Implements a 64-bit relative call.
879 *
880 * @param offDisp The displacment offset.
881 */
882IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
883{
884 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
885 uint64_t uOldPC = pCtx->rip + cbInstr;
886 uint64_t uNewPC = uOldPC + offDisp;
887 if (!IEM_IS_CANONICAL(uNewPC))
888 return iemRaiseNotCanonical(pIemCpu);
889
890 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
891 if (rcStrict != VINF_SUCCESS)
892 return rcStrict;
893
894 pCtx->rip = uNewPC;
895 pCtx->eflags.Bits.u1RF = 0;
896 return VINF_SUCCESS;
897}
898
899
900/**
901 * Implements far jumps and calls thru task segments (TSS).
902 *
903 * @param uSel The selector.
904 * @param enmBranch The kind of branching we're performing.
905 * @param enmEffOpSize The effective operand size.
906 * @param pDesc The descriptor corresponding to @a uSel. The type is
907 * task gate.
908 */
909IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
910{
911#ifndef IEM_IMPLEMENTS_TASKSWITCH
912 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
913#else
914 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
915 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
916 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
917
918 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl
919 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
920 {
921 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
922 pIemCpu->uCpl, (uSel & X86_SEL_RPL)));
923 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
924 }
925
926 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
927 * far calls (see iemCImpl_callf). Most likely in both cases it should be
928 * checked here, need testcases. */
929 if (!pDesc->Legacy.Gen.u1Present)
930 {
931 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel));
932 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
933 }
934
935 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
936 uint32_t uNextEip = pCtx->eip + cbInstr;
937 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
938 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc);
939#endif
940}
941
942
943/**
944 * Implements far jumps and calls thru task gates.
945 *
946 * @param uSel The selector.
947 * @param enmBranch The kind of branching we're performing.
948 * @param enmEffOpSize The effective operand size.
949 * @param pDesc The descriptor corresponding to @a uSel. The type is
950 * task gate.
951 */
952IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
953{
954#ifndef IEM_IMPLEMENTS_TASKSWITCH
955 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
956#else
957 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
958
959 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl
960 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
961 {
962 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
963 pIemCpu->uCpl, (uSel & X86_SEL_RPL)));
964 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
965 }
966
967 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
968 * far calls (see iemCImpl_callf). Most likely in both cases it should be
969 * checked here, need testcases. */
970 if (!pDesc->Legacy.Gen.u1Present)
971 {
972 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel));
973 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
974 }
975
976 /*
977 * Fetch the new TSS descriptor from the GDT.
978 */
979 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel;
980 if (uSelTss & X86_SEL_LDT)
981 {
982 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss));
983 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
984 }
985
986 IEMSELDESC TssDesc;
987 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &TssDesc, uSelTss, X86_XCPT_GP);
988 if (rcStrict != VINF_SUCCESS)
989 return rcStrict;
990
991 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK)
992 {
993 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss,
994 TssDesc.Legacy.Gate.u4Type));
995 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel & X86_SEL_MASK_OFF_RPL);
996 }
997
998 if (!TssDesc.Legacy.Gate.u1Present)
999 {
1000 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss));
1001 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSelTss & X86_SEL_MASK_OFF_RPL);
1002 }
1003
1004 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1005 uint32_t uNextEip = pCtx->eip + cbInstr;
1006 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1007 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc);
1008#endif
1009}
1010
1011
1012/**
1013 * Implements far jumps and calls thru call gates.
1014 *
1015 * @param uSel The selector.
1016 * @param enmBranch The kind of branching we're performing.
1017 * @param enmEffOpSize The effective operand size.
1018 * @param pDesc The descriptor corresponding to @a uSel. The type is
1019 * call gate.
1020 */
1021IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1022{
1023#ifndef IEM_IMPLEMENTS_CALLGATE
1024 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1025#else
1026 /* NB: Far jumps can only do intra-privilege transfers. Far calls support
1027 * inter-privilege calls and are much more complex.
1028 *
1029 * NB: 64-bit call gate has the same type as a 32-bit call gate! If
1030 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
1031 * must be 16-bit or 32-bit.
1032 */
1033 /** @todo: effective operand size is probably irrelevant here, only the
1034 * call gate bitness matters??
1035 */
1036 VBOXSTRICTRC rcStrict;
1037 RTPTRUNION uPtrRet;
1038 uint64_t uNewRsp;
1039 uint64_t uNewRip;
1040 uint64_t u64Base;
1041 uint32_t cbLimit;
1042 RTSEL uNewCS;
1043 IEMSELDESC DescCS;
1044 PCPUMCTX pCtx;
1045
1046 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE);
1047 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1048 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE
1049 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE);
1050
1051 /* Determine the new instruction pointer from the gate descriptor. */
1052 uNewRip = pDesc->Legacy.Gate.u16OffsetLow
1053 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16)
1054 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32);
1055
1056 /* Perform DPL checks on the gate descriptor. */
1057 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl
1058 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1059 {
1060 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1061 pIemCpu->uCpl, (uSel & X86_SEL_RPL)));
1062 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1063 }
1064
1065 /** @todo does this catch NULL selectors, too? */
1066 if (!pDesc->Legacy.Gen.u1Present)
1067 {
1068 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel));
1069 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1070 }
1071
1072 /*
1073 * Fetch the target CS descriptor from the GDT or LDT.
1074 */
1075 uNewCS = pDesc->Legacy.Gate.u16Sel;
1076 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_GP);
1077 if (rcStrict != VINF_SUCCESS)
1078 return rcStrict;
1079
1080 /* Target CS must be a code selector. */
1081 if ( !DescCS.Legacy.Gen.u1DescType
1082 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1083 {
1084 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1085 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1086 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1087 }
1088
1089 /* Privilege checks on target CS. */
1090 if (enmBranch == IEMBRANCH_JUMP)
1091 {
1092 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1093 {
1094 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
1095 {
1096 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1097 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1098 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1099 }
1100 }
1101 else
1102 {
1103 if (DescCS.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
1104 {
1105 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1106 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1107 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1108 }
1109 }
1110 }
1111 else
1112 {
1113 Assert(enmBranch == IEMBRANCH_CALL);
1114 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl)
1115 {
1116 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1117 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1118 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1119 }
1120 }
1121
1122 /* Additional long mode checks. */
1123 if (IEM_IS_LONG_MODE(pIemCpu))
1124 {
1125 if (!DescCS.Legacy.Gen.u1Long)
1126 {
1127 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS));
1128 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1129 }
1130
1131 /* L vs D. */
1132 if ( DescCS.Legacy.Gen.u1Long
1133 && DescCS.Legacy.Gen.u1DefBig)
1134 {
1135 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS));
1136 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
1137 }
1138 }
1139
1140 if (!DescCS.Legacy.Gate.u1Present)
1141 {
1142 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS));
1143 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCS);
1144 }
1145
1146 pCtx = pIemCpu->CTX_SUFF(pCtx);
1147
1148 if (enmBranch == IEMBRANCH_JUMP)
1149 {
1150 /** @todo: This is very similar to regular far jumps; merge! */
1151 /* Jumps are fairly simple... */
1152
1153 /* Chop the high bits off if 16-bit gate (Intel says so). */
1154 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1155 uNewRip = (uint16_t)uNewRip;
1156
1157 /* Limit check for non-long segments. */
1158 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1159 if (DescCS.Legacy.Gen.u1Long)
1160 u64Base = 0;
1161 else
1162 {
1163 if (uNewRip > cbLimit)
1164 {
1165 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
1166 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
1167 }
1168 u64Base = X86DESC_BASE(&DescCS.Legacy);
1169 }
1170
1171 /* Canonical address check. */
1172 if (!IEM_IS_CANONICAL(uNewRip))
1173 {
1174 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1175 return iemRaiseNotCanonical(pIemCpu);
1176 }
1177
1178 /*
1179 * Ok, everything checked out fine. Now set the accessed bit before
1180 * committing the result into CS, CSHID and RIP.
1181 */
1182 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1183 {
1184 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
1185 if (rcStrict != VINF_SUCCESS)
1186 return rcStrict;
1187 /** @todo check what VT-x and AMD-V does. */
1188 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1189 }
1190
1191 /* commit */
1192 pCtx->rip = uNewRip;
1193 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1194 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1195 pCtx->cs.ValidSel = pCtx->cs.Sel;
1196 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1197 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1198 pCtx->cs.u32Limit = cbLimit;
1199 pCtx->cs.u64Base = u64Base;
1200 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1201 }
1202 else
1203 {
1204 Assert(enmBranch == IEMBRANCH_CALL);
1205 /* Calls are much more complicated. */
1206
1207 if (DescCS.Legacy.Gen.u2Dpl < pIemCpu->uCpl)
1208 {
1209 uint16_t offNewStack; /* Offset of new stack in TSS. */
1210 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */
1211 uint8_t uNewCSDpl;
1212 uint8_t cbWords;
1213 RTSEL uNewSS;
1214 RTSEL uOldSS;
1215 uint64_t uOldRsp;
1216 IEMSELDESC DescSS;
1217 RTPTRUNION uPtrTSS;
1218 RTGCPTR GCPtrTSS;
1219 RTPTRUNION uPtrParmWds;
1220 RTGCPTR GCPtrParmWds;
1221
1222 /* More privilege. This is the fun part. */
1223 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */
1224
1225 /*
1226 * Determine new SS:rSP from the TSS.
1227 */
1228 Assert(!pCtx->tr.Attr.n.u1DescType);
1229
1230 /* Figure out where the new stack pointer is stored in the TSS. */
1231 uNewCSDpl = uNewCS & X86_SEL_RPL;
1232 if (!IEM_IS_LONG_MODE(pIemCpu))
1233 {
1234 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1235 {
1236 offNewStack = RT_OFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8;
1237 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0);
1238 }
1239 else
1240 {
1241 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1242 offNewStack = RT_OFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4;
1243 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0);
1244 }
1245 }
1246 else
1247 {
1248 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1249 offNewStack = RT_OFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0);
1250 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0);
1251 }
1252
1253 /* Check against TSS limit. */
1254 if ((uint16_t)(offNewStack + cbNewStack - 1) > pCtx->tr.u32Limit)
1255 {
1256 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pCtx->tr.u32Limit));
1257 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, pCtx->tr.Sel);
1258 }
1259
1260 GCPtrTSS = pCtx->tr.u64Base + offNewStack;
1261 rcStrict = iemMemMap(pIemCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R);
1262 if (rcStrict != VINF_SUCCESS)
1263 {
1264 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1265 return rcStrict;
1266 }
1267
1268 if (!IEM_IS_LONG_MODE(pIemCpu))
1269 {
1270 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1271 {
1272 uNewRsp = uPtrTSS.pu32[0];
1273 uNewSS = uPtrTSS.pu16[2];
1274 }
1275 else
1276 {
1277 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1278 uNewRsp = uPtrTSS.pu16[0];
1279 uNewSS = uPtrTSS.pu16[1];
1280 }
1281 }
1282 else
1283 {
1284 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1285 /* SS will be a NULL selector, but that's valid. */
1286 uNewRsp = uPtrTSS.pu64[0];
1287 uNewSS = uNewCSDpl;
1288 }
1289
1290 /* Done with the TSS now. */
1291 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);
1292 if (rcStrict != VINF_SUCCESS)
1293 {
1294 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1295 return rcStrict;
1296 }
1297
1298 /* Only used outside of long mode. */
1299 cbWords = pDesc->Legacy.Gate.u4ParmCount;
1300
1301 /* If EFER.LMA is 0, there's extra work to do. */
1302 if (!IEM_IS_LONG_MODE(pIemCpu))
1303 {
1304 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
1305 {
1306 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
1307 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS);
1308 }
1309
1310 /* Grab the new SS descriptor. */
1311 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_SS);
1312 if (rcStrict != VINF_SUCCESS)
1313 return rcStrict;
1314
1315 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */
1316 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL))
1317 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl))
1318 {
1319 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n",
1320 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl));
1321 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS);
1322 }
1323
1324 /* Ensure new SS is a writable data segment. */
1325 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
1326 {
1327 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
1328 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS);
1329 }
1330
1331 if (!DescSS.Legacy.Gen.u1Present)
1332 {
1333 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS));
1334 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
1335 }
1336 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1337 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords);
1338 else
1339 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords);
1340 }
1341 else
1342 {
1343 /* Just grab the new (NULL) SS descriptor. */
1344 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_SS);
1345 if (rcStrict != VINF_SUCCESS)
1346 return rcStrict;
1347
1348 cbNewStack = sizeof(uint64_t) * 4;
1349 }
1350
1351 /** @todo: According to Intel, new stack is checked for enough space first,
1352 * then switched. According to AMD, the stack is switched first and
1353 * then pushes might fault!
1354 */
1355
1356 /** @todo: According to AMD, CS is loaded first, then SS.
1357 * According to Intel, it's the other way around!?
1358 */
1359
1360 /** @todo: Intel and AMD disagree on when exactly the CPL changes! */
1361
1362 /* Set the accessed bit before committing new SS. */
1363 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1364 {
1365 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
1366 if (rcStrict != VINF_SUCCESS)
1367 return rcStrict;
1368 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1369 }
1370
1371 /* Remember the old SS:rSP and their linear address. */
1372 uOldSS = pCtx->ss.Sel;
1373 uOldRsp = pCtx->rsp;
1374
1375 GCPtrParmWds = pCtx->ss.u64Base + pCtx->rsp;
1376
1377 /* Commit new SS:rSP. */
1378 pCtx->ss.Sel = uNewSS;
1379 pCtx->ss.ValidSel = uNewSS;
1380 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1381 pCtx->ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);
1382 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1383 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1384 pCtx->rsp = uNewRsp;
1385 pIemCpu->uCpl = uNewCSDpl;
1386 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));
1387 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
1388
1389 /* Check new stack - may #SS(NewSS). */
1390 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbNewStack,
1391 &uPtrRet.pv, &uNewRsp);
1392 if (rcStrict != VINF_SUCCESS)
1393 {
1394 Log(("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1395 return rcStrict;
1396 }
1397
1398 if (!IEM_IS_LONG_MODE(pIemCpu))
1399 {
1400 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1401 {
1402 /* Push the old CS:rIP. */
1403 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1404 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1405
1406 /* Map the relevant chunk of the old stack. */
1407 rcStrict = iemMemMap(pIemCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1408 if (rcStrict != VINF_SUCCESS)
1409 {
1410 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1411 return rcStrict;
1412 }
1413
1414 /* Copy the parameter (d)words. */
1415 for (int i = 0; i < cbWords; ++i)
1416 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i];
1417
1418 /* Unmap the old stack. */
1419 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1420 if (rcStrict != VINF_SUCCESS)
1421 {
1422 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1423 return rcStrict;
1424 }
1425
1426 /* Push the old SS:rSP. */
1427 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
1428 uPtrRet.pu32[2 + cbWords + 1] = uOldSS;
1429 }
1430 else
1431 {
1432 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1433
1434 /* Push the old CS:rIP. */
1435 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1436 uPtrRet.pu16[1] = pCtx->cs.Sel;
1437
1438 /* Map the relevant chunk of the old stack. */
1439 rcStrict = iemMemMap(pIemCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1440 if (rcStrict != VINF_SUCCESS)
1441 {
1442 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1443 return rcStrict;
1444 }
1445
1446 /* Copy the parameter words. */
1447 for (int i = 0; i < cbWords; ++i)
1448 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i];
1449
1450 /* Unmap the old stack. */
1451 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1452 if (rcStrict != VINF_SUCCESS)
1453 {
1454 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1455 return rcStrict;
1456 }
1457
1458 /* Push the old SS:rSP. */
1459 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp;
1460 uPtrRet.pu16[2 + cbWords + 1] = uOldSS;
1461 }
1462 }
1463 else
1464 {
1465 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1466
1467 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
1468 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1469 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1470 uPtrRet.pu64[2] = uOldRsp;
1471 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */
1472 }
1473
1474 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1475 if (rcStrict != VINF_SUCCESS)
1476 {
1477 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1478 return rcStrict;
1479 }
1480
1481 /* Chop the high bits off if 16-bit gate (Intel says so). */
1482 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1483 uNewRip = (uint16_t)uNewRip;
1484
1485 /* Limit / canonical check. */
1486 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1487 if (!IEM_IS_LONG_MODE(pIemCpu))
1488 {
1489 if (uNewRip > cbLimit)
1490 {
1491 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1492 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
1493 }
1494 u64Base = X86DESC_BASE(&DescCS.Legacy);
1495 }
1496 else
1497 {
1498 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1499 if (!IEM_IS_CANONICAL(uNewRip))
1500 {
1501 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1502 return iemRaiseNotCanonical(pIemCpu);
1503 }
1504 u64Base = 0;
1505 }
1506
1507 /*
1508 * Now set the accessed bit before
1509 * writing the return address to the stack and committing the result into
1510 * CS, CSHID and RIP.
1511 */
1512 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1513 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1514 {
1515 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
1516 if (rcStrict != VINF_SUCCESS)
1517 return rcStrict;
1518 /** @todo check what VT-x and AMD-V does. */
1519 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1520 }
1521
1522 /* Commit new CS:rIP. */
1523 pCtx->rip = uNewRip;
1524 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1525 pCtx->cs.Sel |= pIemCpu->uCpl;
1526 pCtx->cs.ValidSel = pCtx->cs.Sel;
1527 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1528 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1529 pCtx->cs.u32Limit = cbLimit;
1530 pCtx->cs.u64Base = u64Base;
1531 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1532 }
1533 else
1534 {
1535 /* Same privilege. */
1536 /** @todo: This is very similar to regular far calls; merge! */
1537
1538 /* Check stack first - may #SS(0). */
1539 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
1540 * 16-bit code cause a two or four byte CS to be pushed? */
1541 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1542 IEM_IS_LONG_MODE(pIemCpu) ? 8+8
1543 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
1544 &uPtrRet.pv, &uNewRsp);
1545 if (rcStrict != VINF_SUCCESS)
1546 return rcStrict;
1547
1548 /* Chop the high bits off if 16-bit gate (Intel says so). */
1549 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1550 uNewRip = (uint16_t)uNewRip;
1551
1552 /* Limit / canonical check. */
1553 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1554 if (!IEM_IS_LONG_MODE(pIemCpu))
1555 {
1556 if (uNewRip > cbLimit)
1557 {
1558 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1559 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
1560 }
1561 u64Base = X86DESC_BASE(&DescCS.Legacy);
1562 }
1563 else
1564 {
1565 if (!IEM_IS_CANONICAL(uNewRip))
1566 {
1567 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1568 return iemRaiseNotCanonical(pIemCpu);
1569 }
1570 u64Base = 0;
1571 }
1572
1573 /*
1574 * Now set the accessed bit before
1575 * writing the return address to the stack and committing the result into
1576 * CS, CSHID and RIP.
1577 */
1578 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1579 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1580 {
1581 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
1582 if (rcStrict != VINF_SUCCESS)
1583 return rcStrict;
1584 /** @todo check what VT-x and AMD-V does. */
1585 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1586 }
1587
1588 /* stack */
1589 if (!IEM_IS_LONG_MODE(pIemCpu))
1590 {
1591 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1592 {
1593 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1594 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1595 }
1596 else
1597 {
1598 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1599 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1600 uPtrRet.pu16[1] = pCtx->cs.Sel;
1601 }
1602 }
1603 else
1604 {
1605 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1606 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1607 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1608 }
1609
1610 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1611 if (rcStrict != VINF_SUCCESS)
1612 return rcStrict;
1613
1614 /* commit */
1615 pCtx->rip = uNewRip;
1616 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1617 pCtx->cs.Sel |= pIemCpu->uCpl;
1618 pCtx->cs.ValidSel = pCtx->cs.Sel;
1619 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1620 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1621 pCtx->cs.u32Limit = cbLimit;
1622 pCtx->cs.u64Base = u64Base;
1623 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1624 }
1625 }
1626 pCtx->eflags.Bits.u1RF = 0;
1627 return VINF_SUCCESS;
1628#endif
1629}
1630
1631
1632/**
1633 * Implements far jumps and calls thru system selectors.
1634 *
1635 * @param uSel The selector.
1636 * @param enmBranch The kind of branching we're performing.
1637 * @param enmEffOpSize The effective operand size.
1638 * @param pDesc The descriptor corresponding to @a uSel.
1639 */
1640IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1641{
1642 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1643 Assert((uSel & X86_SEL_MASK_OFF_RPL));
1644
1645 if (IEM_IS_LONG_MODE(pIemCpu))
1646 switch (pDesc->Legacy.Gen.u4Type)
1647 {
1648 case AMD64_SEL_TYPE_SYS_CALL_GATE:
1649 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1650
1651 default:
1652 case AMD64_SEL_TYPE_SYS_LDT:
1653 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
1654 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
1655 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
1656 case AMD64_SEL_TYPE_SYS_INT_GATE:
1657 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1658 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1659 }
1660
1661 switch (pDesc->Legacy.Gen.u4Type)
1662 {
1663 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1664 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1665 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1666
1667 case X86_SEL_TYPE_SYS_TASK_GATE:
1668 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
1669
1670 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1671 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1672 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
1673
1674 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1675 Log(("branch %04x -> busy 286 TSS\n", uSel));
1676 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1677
1678 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1679 Log(("branch %04x -> busy 386 TSS\n", uSel));
1680 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1681
1682 default:
1683 case X86_SEL_TYPE_SYS_LDT:
1684 case X86_SEL_TYPE_SYS_286_INT_GATE:
1685 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1686 case X86_SEL_TYPE_SYS_386_INT_GATE:
1687 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1688 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1689 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1690 }
1691}
1692
1693
1694/**
1695 * Implements far jumps.
1696 *
1697 * @param uSel The selector.
1698 * @param offSeg The segment offset.
1699 * @param enmEffOpSize The effective operand size.
1700 */
1701IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1702{
1703 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1704 NOREF(cbInstr);
1705 Assert(offSeg <= UINT32_MAX);
1706
1707 /*
1708 * Real mode and V8086 mode are easy. The only snag seems to be that
1709 * CS.limit doesn't change and the limit check is done against the current
1710 * limit.
1711 */
1712 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1713 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1714 {
1715 if (offSeg > pCtx->cs.u32Limit)
1716 return iemRaiseGeneralProtectionFault0(pIemCpu);
1717
1718 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
1719 pCtx->rip = offSeg;
1720 else
1721 pCtx->rip = offSeg & UINT16_MAX;
1722 pCtx->cs.Sel = uSel;
1723 pCtx->cs.ValidSel = uSel;
1724 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1725 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1726 pCtx->eflags.Bits.u1RF = 0;
1727 return VINF_SUCCESS;
1728 }
1729
1730 /*
1731 * Protected mode. Need to parse the specified descriptor...
1732 */
1733 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1734 {
1735 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1736 return iemRaiseGeneralProtectionFault0(pIemCpu);
1737 }
1738
1739 /* Fetch the descriptor. */
1740 IEMSELDESC Desc;
1741 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP);
1742 if (rcStrict != VINF_SUCCESS)
1743 return rcStrict;
1744
1745 /* Is it there? */
1746 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
1747 {
1748 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1749 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1750 }
1751
1752 /*
1753 * Deal with it according to its type. We do the standard code selectors
1754 * here and dispatch the system selectors to worker functions.
1755 */
1756 if (!Desc.Legacy.Gen.u1DescType)
1757 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
1758
1759 /* Only code segments. */
1760 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1761 {
1762 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1763 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1764 }
1765
1766 /* L vs D. */
1767 if ( Desc.Legacy.Gen.u1Long
1768 && Desc.Legacy.Gen.u1DefBig
1769 && IEM_IS_LONG_MODE(pIemCpu))
1770 {
1771 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1772 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1773 }
1774
1775 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1776 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1777 {
1778 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1779 {
1780 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1781 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1782 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1783 }
1784 }
1785 else
1786 {
1787 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1788 {
1789 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1790 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1791 }
1792 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1793 {
1794 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1795 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1796 }
1797 }
1798
1799 /* Chop the high bits if 16-bit (Intel says so). */
1800 if (enmEffOpSize == IEMMODE_16BIT)
1801 offSeg &= UINT16_MAX;
1802
1803 /* Limit check. (Should alternatively check for non-canonical addresses
1804 here, but that is ruled out by offSeg being 32-bit, right?) */
1805 uint64_t u64Base;
1806 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1807 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1808 u64Base = 0;
1809 else
1810 {
1811 if (offSeg > cbLimit)
1812 {
1813 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1814 /** @todo: Intel says this is #GP(0)! */
1815 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1816 }
1817 u64Base = X86DESC_BASE(&Desc.Legacy);
1818 }
1819
1820 /*
1821 * Ok, everything checked out fine. Now set the accessed bit before
1822 * committing the result into CS, CSHID and RIP.
1823 */
1824 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1825 {
1826 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1827 if (rcStrict != VINF_SUCCESS)
1828 return rcStrict;
1829 /** @todo check what VT-x and AMD-V does. */
1830 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1831 }
1832
1833 /* commit */
1834 pCtx->rip = offSeg;
1835 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1836 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1837 pCtx->cs.ValidSel = pCtx->cs.Sel;
1838 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1839 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1840 pCtx->cs.u32Limit = cbLimit;
1841 pCtx->cs.u64Base = u64Base;
1842 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
1843 pCtx->eflags.Bits.u1RF = 0;
1844 /** @todo check if the hidden bits are loaded correctly for 64-bit
1845 * mode. */
1846 return VINF_SUCCESS;
1847}
1848
1849
1850/**
1851 * Implements far calls.
1852 *
1853 * This very similar to iemCImpl_FarJmp.
1854 *
1855 * @param uSel The selector.
1856 * @param offSeg The segment offset.
1857 * @param enmEffOpSize The operand size (in case we need it).
1858 */
1859IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1860{
1861 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1862 VBOXSTRICTRC rcStrict;
1863 uint64_t uNewRsp;
1864 RTPTRUNION uPtrRet;
1865
1866 /*
1867 * Real mode and V8086 mode are easy. The only snag seems to be that
1868 * CS.limit doesn't change and the limit check is done against the current
1869 * limit.
1870 */
1871 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1872 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1873 {
1874 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1875
1876 /* Check stack first - may #SS(0). */
1877 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1878 &uPtrRet.pv, &uNewRsp);
1879 if (rcStrict != VINF_SUCCESS)
1880 return rcStrict;
1881
1882 /* Check the target address range. */
1883 if (offSeg > UINT32_MAX)
1884 return iemRaiseGeneralProtectionFault0(pIemCpu);
1885
1886 /* Everything is fine, push the return address. */
1887 if (enmEffOpSize == IEMMODE_16BIT)
1888 {
1889 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1890 uPtrRet.pu16[1] = pCtx->cs.Sel;
1891 }
1892 else
1893 {
1894 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1895 uPtrRet.pu16[3] = pCtx->cs.Sel;
1896 }
1897 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1898 if (rcStrict != VINF_SUCCESS)
1899 return rcStrict;
1900
1901 /* Branch. */
1902 pCtx->rip = offSeg;
1903 pCtx->cs.Sel = uSel;
1904 pCtx->cs.ValidSel = uSel;
1905 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1906 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1907 pCtx->eflags.Bits.u1RF = 0;
1908 return VINF_SUCCESS;
1909 }
1910
1911 /*
1912 * Protected mode. Need to parse the specified descriptor...
1913 */
1914 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1915 {
1916 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1917 return iemRaiseGeneralProtectionFault0(pIemCpu);
1918 }
1919
1920 /* Fetch the descriptor. */
1921 IEMSELDESC Desc;
1922 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP);
1923 if (rcStrict != VINF_SUCCESS)
1924 return rcStrict;
1925
1926 /*
1927 * Deal with it according to its type. We do the standard code selectors
1928 * here and dispatch the system selectors to worker functions.
1929 */
1930 if (!Desc.Legacy.Gen.u1DescType)
1931 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1932
1933 /* Only code segments. */
1934 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1935 {
1936 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1937 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1938 }
1939
1940 /* L vs D. */
1941 if ( Desc.Legacy.Gen.u1Long
1942 && Desc.Legacy.Gen.u1DefBig
1943 && IEM_IS_LONG_MODE(pIemCpu))
1944 {
1945 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1946 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1947 }
1948
1949 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1950 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1951 {
1952 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1953 {
1954 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1955 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1956 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1957 }
1958 }
1959 else
1960 {
1961 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1962 {
1963 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1964 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1965 }
1966 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1967 {
1968 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1969 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1970 }
1971 }
1972
1973 /* Is it there? */
1974 if (!Desc.Legacy.Gen.u1Present)
1975 {
1976 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1977 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1978 }
1979
1980 /* Check stack first - may #SS(0). */
1981 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1982 * 16-bit code cause a two or four byte CS to be pushed? */
1983 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1984 enmEffOpSize == IEMMODE_64BIT ? 8+8
1985 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1986 &uPtrRet.pv, &uNewRsp);
1987 if (rcStrict != VINF_SUCCESS)
1988 return rcStrict;
1989
1990 /* Chop the high bits if 16-bit (Intel says so). */
1991 if (enmEffOpSize == IEMMODE_16BIT)
1992 offSeg &= UINT16_MAX;
1993
1994 /* Limit / canonical check. */
1995 uint64_t u64Base;
1996 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1997 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1998 {
1999 if (!IEM_IS_CANONICAL(offSeg))
2000 {
2001 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
2002 return iemRaiseNotCanonical(pIemCpu);
2003 }
2004 u64Base = 0;
2005 }
2006 else
2007 {
2008 if (offSeg > cbLimit)
2009 {
2010 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
2011 /** @todo: Intel says this is #GP(0)! */
2012 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2013 }
2014 u64Base = X86DESC_BASE(&Desc.Legacy);
2015 }
2016
2017 /*
2018 * Now set the accessed bit before
2019 * writing the return address to the stack and committing the result into
2020 * CS, CSHID and RIP.
2021 */
2022 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2023 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2024 {
2025 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2026 if (rcStrict != VINF_SUCCESS)
2027 return rcStrict;
2028 /** @todo check what VT-x and AMD-V does. */
2029 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2030 }
2031
2032 /* stack */
2033 if (enmEffOpSize == IEMMODE_16BIT)
2034 {
2035 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
2036 uPtrRet.pu16[1] = pCtx->cs.Sel;
2037 }
2038 else if (enmEffOpSize == IEMMODE_32BIT)
2039 {
2040 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
2041 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
2042 }
2043 else
2044 {
2045 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
2046 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
2047 }
2048 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
2049 if (rcStrict != VINF_SUCCESS)
2050 return rcStrict;
2051
2052 /* commit */
2053 pCtx->rip = offSeg;
2054 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2055 pCtx->cs.Sel |= pIemCpu->uCpl;
2056 pCtx->cs.ValidSel = pCtx->cs.Sel;
2057 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2058 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2059 pCtx->cs.u32Limit = cbLimit;
2060 pCtx->cs.u64Base = u64Base;
2061 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
2062 pCtx->eflags.Bits.u1RF = 0;
2063 /** @todo check if the hidden bits are loaded correctly for 64-bit
2064 * mode. */
2065 return VINF_SUCCESS;
2066}
2067
2068
2069/**
2070 * Implements retf.
2071 *
2072 * @param enmEffOpSize The effective operand size.
2073 * @param cbPop The amount of arguments to pop from the stack
2074 * (bytes).
2075 */
2076IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2077{
2078 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2079 VBOXSTRICTRC rcStrict;
2080 RTCPTRUNION uPtrFrame;
2081 uint64_t uNewRsp;
2082 uint64_t uNewRip;
2083 uint16_t uNewCs;
2084 NOREF(cbInstr);
2085
2086 /*
2087 * Read the stack values first.
2088 */
2089 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
2090 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
2091 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
2092 if (rcStrict != VINF_SUCCESS)
2093 return rcStrict;
2094 if (enmEffOpSize == IEMMODE_16BIT)
2095 {
2096 uNewRip = uPtrFrame.pu16[0];
2097 uNewCs = uPtrFrame.pu16[1];
2098 }
2099 else if (enmEffOpSize == IEMMODE_32BIT)
2100 {
2101 uNewRip = uPtrFrame.pu32[0];
2102 uNewCs = uPtrFrame.pu16[2];
2103 }
2104 else
2105 {
2106 uNewRip = uPtrFrame.pu64[0];
2107 uNewCs = uPtrFrame.pu16[4];
2108 }
2109
2110 /*
2111 * Real mode and V8086 mode are easy.
2112 */
2113 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2114 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2115 {
2116 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2117 /** @todo check how this is supposed to work if sp=0xfffe. */
2118
2119 /* Check the limit of the new EIP. */
2120 /** @todo Intel pseudo code only does the limit check for 16-bit
2121 * operands, AMD does not make any distinction. What is right? */
2122 if (uNewRip > pCtx->cs.u32Limit)
2123 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2124
2125 /* commit the operation. */
2126 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
2127 if (rcStrict != VINF_SUCCESS)
2128 return rcStrict;
2129 pCtx->rip = uNewRip;
2130 pCtx->cs.Sel = uNewCs;
2131 pCtx->cs.ValidSel = uNewCs;
2132 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2133 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2134 pCtx->eflags.Bits.u1RF = 0;
2135 /** @todo do we load attribs and limit as well? */
2136 if (cbPop)
2137 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2138 return VINF_SUCCESS;
2139 }
2140
2141 /*
2142 * Protected mode is complicated, of course.
2143 */
2144 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2145 {
2146 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
2147 return iemRaiseGeneralProtectionFault0(pIemCpu);
2148 }
2149
2150 /* Fetch the descriptor. */
2151 IEMSELDESC DescCs;
2152 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs, X86_XCPT_GP);
2153 if (rcStrict != VINF_SUCCESS)
2154 return rcStrict;
2155
2156 /* Can only return to a code selector. */
2157 if ( !DescCs.Legacy.Gen.u1DescType
2158 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
2159 {
2160 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
2161 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
2162 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2163 }
2164
2165 /* L vs D. */
2166 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
2167 && DescCs.Legacy.Gen.u1DefBig
2168 && IEM_IS_LONG_MODE(pIemCpu))
2169 {
2170 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
2171 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2172 }
2173
2174 /* DPL/RPL/CPL checks. */
2175 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2176 {
2177 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
2178 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2179 }
2180
2181 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2182 {
2183 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
2184 {
2185 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
2186 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2187 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2188 }
2189 }
2190 else
2191 {
2192 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
2193 {
2194 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
2195 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2196 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2197 }
2198 }
2199
2200 /* Is it there? */
2201 if (!DescCs.Legacy.Gen.u1Present)
2202 {
2203 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
2204 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2205 }
2206
2207 /*
2208 * Return to outer privilege? (We'll typically have entered via a call gate.)
2209 */
2210 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2211 {
2212 /* Read the outer stack pointer stored *after* the parameters. */
2213 RTCPTRUNION uPtrStack;
2214 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
2215 if (rcStrict != VINF_SUCCESS)
2216 return rcStrict;
2217
2218 uPtrStack.pu8 += cbPop; /* Skip the parameters. */
2219
2220 uint16_t uNewOuterSs;
2221 uint64_t uNewOuterRsp;
2222 if (enmEffOpSize == IEMMODE_16BIT)
2223 {
2224 uNewOuterRsp = uPtrStack.pu16[0];
2225 uNewOuterSs = uPtrStack.pu16[1];
2226 }
2227 else if (enmEffOpSize == IEMMODE_32BIT)
2228 {
2229 uNewOuterRsp = uPtrStack.pu32[0];
2230 uNewOuterSs = uPtrStack.pu16[2];
2231 }
2232 else
2233 {
2234 uNewOuterRsp = uPtrStack.pu64[0];
2235 uNewOuterSs = uPtrStack.pu16[4];
2236 }
2237
2238 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
2239 and read the selector. */
2240 IEMSELDESC DescSs;
2241 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
2242 {
2243 if ( !DescCs.Legacy.Gen.u1Long
2244 || (uNewOuterSs & X86_SEL_RPL) == 3)
2245 {
2246 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
2247 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2248 return iemRaiseGeneralProtectionFault0(pIemCpu);
2249 }
2250 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
2251 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
2252 }
2253 else
2254 {
2255 /* Fetch the descriptor for the new stack segment. */
2256 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);
2257 if (rcStrict != VINF_SUCCESS)
2258 return rcStrict;
2259 }
2260
2261 /* Check that RPL of stack and code selectors match. */
2262 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
2263 {
2264 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2265 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2266 }
2267
2268 /* Must be a writable data segment. */
2269 if ( !DescSs.Legacy.Gen.u1DescType
2270 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2271 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2272 {
2273 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
2274 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
2275 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2276 }
2277
2278 /* L vs D. (Not mentioned by intel.) */
2279 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
2280 && DescSs.Legacy.Gen.u1DefBig
2281 && IEM_IS_LONG_MODE(pIemCpu))
2282 {
2283 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
2284 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2285 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2286 }
2287
2288 /* DPL/RPL/CPL checks. */
2289 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2290 {
2291 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
2292 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
2293 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
2294 }
2295
2296 /* Is it there? */
2297 if (!DescSs.Legacy.Gen.u1Present)
2298 {
2299 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2300 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2301 }
2302
2303 /* Calc SS limit.*/
2304 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
2305
2306 /* Is RIP canonical or within CS.limit? */
2307 uint64_t u64Base;
2308 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2309
2310 /** @todo Testcase: Is this correct? */
2311 if ( DescCs.Legacy.Gen.u1Long
2312 && IEM_IS_LONG_MODE(pIemCpu) )
2313 {
2314 if (!IEM_IS_CANONICAL(uNewRip))
2315 {
2316 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2317 return iemRaiseNotCanonical(pIemCpu);
2318 }
2319 u64Base = 0;
2320 }
2321 else
2322 {
2323 if (uNewRip > cbLimitCs)
2324 {
2325 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
2326 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
2327 /** @todo: Intel says this is #GP(0)! */
2328 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2329 }
2330 u64Base = X86DESC_BASE(&DescCs.Legacy);
2331 }
2332
2333 /*
2334 * Now set the accessed bit before
2335 * writing the return address to the stack and committing the result into
2336 * CS, CSHID and RIP.
2337 */
2338 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
2339 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2340 {
2341 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2342 if (rcStrict != VINF_SUCCESS)
2343 return rcStrict;
2344 /** @todo check what VT-x and AMD-V does. */
2345 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2346 }
2347 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
2348 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2349 {
2350 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
2351 if (rcStrict != VINF_SUCCESS)
2352 return rcStrict;
2353 /** @todo check what VT-x and AMD-V does. */
2354 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2355 }
2356
2357 /* commit */
2358 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
2359 if (rcStrict != VINF_SUCCESS)
2360 return rcStrict;
2361 if (enmEffOpSize == IEMMODE_16BIT)
2362 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2363 else
2364 pCtx->rip = uNewRip;
2365 pCtx->cs.Sel = uNewCs;
2366 pCtx->cs.ValidSel = uNewCs;
2367 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2368 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2369 pCtx->cs.u32Limit = cbLimitCs;
2370 pCtx->cs.u64Base = u64Base;
2371 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
2372 pCtx->rsp = uNewOuterRsp;
2373 pCtx->ss.Sel = uNewOuterSs;
2374 pCtx->ss.ValidSel = uNewOuterSs;
2375 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2376 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
2377 pCtx->ss.u32Limit = cbLimitSs;
2378 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2379 pCtx->ss.u64Base = 0;
2380 else
2381 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
2382
2383 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
2384 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2385 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2386 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2387 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2388
2389 /** @todo check if the hidden bits are loaded correctly for 64-bit
2390 * mode. */
2391
2392 if (cbPop)
2393 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2394 pCtx->eflags.Bits.u1RF = 0;
2395
2396 /* Done! */
2397 }
2398 /*
2399 * Return to the same privilege level
2400 */
2401 else
2402 {
2403 /* Limit / canonical check. */
2404 uint64_t u64Base;
2405 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2406
2407 /** @todo Testcase: Is this correct? */
2408 if ( DescCs.Legacy.Gen.u1Long
2409 && IEM_IS_LONG_MODE(pIemCpu) )
2410 {
2411 if (!IEM_IS_CANONICAL(uNewRip))
2412 {
2413 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
2414 return iemRaiseNotCanonical(pIemCpu);
2415 }
2416 u64Base = 0;
2417 }
2418 else
2419 {
2420 if (uNewRip > cbLimitCs)
2421 {
2422 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
2423 /** @todo: Intel says this is #GP(0)! */
2424 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2425 }
2426 u64Base = X86DESC_BASE(&DescCs.Legacy);
2427 }
2428
2429 /*
2430 * Now set the accessed bit before
2431 * writing the return address to the stack and committing the result into
2432 * CS, CSHID and RIP.
2433 */
2434 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2435 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2436 {
2437 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2438 if (rcStrict != VINF_SUCCESS)
2439 return rcStrict;
2440 /** @todo check what VT-x and AMD-V does. */
2441 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2442 }
2443
2444 /* commit */
2445 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
2446 if (rcStrict != VINF_SUCCESS)
2447 return rcStrict;
2448 if (enmEffOpSize == IEMMODE_16BIT)
2449 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2450 else
2451 pCtx->rip = uNewRip;
2452 pCtx->cs.Sel = uNewCs;
2453 pCtx->cs.ValidSel = uNewCs;
2454 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2455 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2456 pCtx->cs.u32Limit = cbLimitCs;
2457 pCtx->cs.u64Base = u64Base;
2458 /** @todo check if the hidden bits are loaded correctly for 64-bit
2459 * mode. */
2460 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
2461 if (cbPop)
2462 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2463 pCtx->eflags.Bits.u1RF = 0;
2464 }
2465 return VINF_SUCCESS;
2466}
2467
2468
2469/**
2470 * Implements retn.
2471 *
2472 * We're doing this in C because of the \#GP that might be raised if the popped
2473 * program counter is out of bounds.
2474 *
2475 * @param enmEffOpSize The effective operand size.
2476 * @param cbPop The amount of arguments to pop from the stack
2477 * (bytes).
2478 */
2479IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2480{
2481 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2482 NOREF(cbInstr);
2483
2484 /* Fetch the RSP from the stack. */
2485 VBOXSTRICTRC rcStrict;
2486 RTUINT64U NewRip;
2487 RTUINT64U NewRsp;
2488 NewRsp.u = pCtx->rsp;
2489 switch (enmEffOpSize)
2490 {
2491 case IEMMODE_16BIT:
2492 NewRip.u = 0;
2493 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
2494 break;
2495 case IEMMODE_32BIT:
2496 NewRip.u = 0;
2497 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
2498 break;
2499 case IEMMODE_64BIT:
2500 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
2501 break;
2502 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2503 }
2504 if (rcStrict != VINF_SUCCESS)
2505 return rcStrict;
2506
2507 /* Check the new RSP before loading it. */
2508 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
2509 * of it. The canonical test is performed here and for call. */
2510 if (enmEffOpSize != IEMMODE_64BIT)
2511 {
2512 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
2513 {
2514 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
2515 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2516 }
2517 }
2518 else
2519 {
2520 if (!IEM_IS_CANONICAL(NewRip.u))
2521 {
2522 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
2523 return iemRaiseNotCanonical(pIemCpu);
2524 }
2525 }
2526
2527 /* Commit it. */
2528 pCtx->rip = NewRip.u;
2529 pCtx->rsp = NewRsp.u;
2530 if (cbPop)
2531 iemRegAddToRsp(pIemCpu, pCtx, cbPop);
2532 pCtx->eflags.Bits.u1RF = 0;
2533
2534 return VINF_SUCCESS;
2535}
2536
2537
2538/**
2539 * Implements enter.
2540 *
2541 * We're doing this in C because the instruction is insane, even for the
2542 * u8NestingLevel=0 case dealing with the stack is tedious.
2543 *
2544 * @param enmEffOpSize The effective operand size.
2545 */
2546IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
2547{
2548 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2549
2550 /* Push RBP, saving the old value in TmpRbp. */
2551 RTUINT64U NewRsp; NewRsp.u = pCtx->rsp;
2552 RTUINT64U TmpRbp; TmpRbp.u = pCtx->rbp;
2553 RTUINT64U NewRbp;
2554 VBOXSTRICTRC rcStrict;
2555 if (enmEffOpSize == IEMMODE_64BIT)
2556 {
2557 rcStrict = iemMemStackPushU64Ex(pIemCpu, TmpRbp.u, &NewRsp);
2558 NewRbp = NewRsp;
2559 }
2560 else if (enmEffOpSize == IEMMODE_32BIT)
2561 {
2562 rcStrict = iemMemStackPushU32Ex(pIemCpu, TmpRbp.DWords.dw0, &NewRsp);
2563 NewRbp = NewRsp;
2564 }
2565 else
2566 {
2567 rcStrict = iemMemStackPushU16Ex(pIemCpu, TmpRbp.Words.w0, &NewRsp);
2568 NewRbp = TmpRbp;
2569 NewRbp.Words.w0 = NewRsp.Words.w0;
2570 }
2571 if (rcStrict != VINF_SUCCESS)
2572 return rcStrict;
2573
2574 /* Copy the parameters (aka nesting levels by Intel). */
2575 cParameters &= 0x1f;
2576 if (cParameters > 0)
2577 {
2578 switch (enmEffOpSize)
2579 {
2580 case IEMMODE_16BIT:
2581 if (pCtx->ss.Attr.n.u1DefBig)
2582 TmpRbp.DWords.dw0 -= 2;
2583 else
2584 TmpRbp.Words.w0 -= 2;
2585 do
2586 {
2587 uint16_t u16Tmp;
2588 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Tmp, &TmpRbp);
2589 if (rcStrict != VINF_SUCCESS)
2590 break;
2591 rcStrict = iemMemStackPushU16Ex(pIemCpu, u16Tmp, &NewRsp);
2592 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2593 break;
2594
2595 case IEMMODE_32BIT:
2596 if (pCtx->ss.Attr.n.u1DefBig)
2597 TmpRbp.DWords.dw0 -= 4;
2598 else
2599 TmpRbp.Words.w0 -= 4;
2600 do
2601 {
2602 uint32_t u32Tmp;
2603 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Tmp, &TmpRbp);
2604 if (rcStrict != VINF_SUCCESS)
2605 break;
2606 rcStrict = iemMemStackPushU32Ex(pIemCpu, u32Tmp, &NewRsp);
2607 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2608 break;
2609
2610 case IEMMODE_64BIT:
2611 TmpRbp.u -= 8;
2612 do
2613 {
2614 uint64_t u64Tmp;
2615 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Tmp, &TmpRbp);
2616 if (rcStrict != VINF_SUCCESS)
2617 break;
2618 rcStrict = iemMemStackPushU64Ex(pIemCpu, u64Tmp, &NewRsp);
2619 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2620 break;
2621
2622 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2623 }
2624 if (rcStrict != VINF_SUCCESS)
2625 return VINF_SUCCESS;
2626
2627 /* Push the new RBP */
2628 if (enmEffOpSize == IEMMODE_64BIT)
2629 rcStrict = iemMemStackPushU64Ex(pIemCpu, NewRbp.u, &NewRsp);
2630 else if (enmEffOpSize == IEMMODE_32BIT)
2631 rcStrict = iemMemStackPushU32Ex(pIemCpu, NewRbp.DWords.dw0, &NewRsp);
2632 else
2633 rcStrict = iemMemStackPushU16Ex(pIemCpu, NewRbp.Words.w0, &NewRsp);
2634 if (rcStrict != VINF_SUCCESS)
2635 return rcStrict;
2636
2637 }
2638
2639 /* Recalc RSP. */
2640 iemRegSubFromRspEx(pIemCpu, pCtx, &NewRsp, cbFrame);
2641
2642 /** @todo Should probe write access at the new RSP according to AMD. */
2643
2644 /* Commit it. */
2645 pCtx->rbp = NewRbp.u;
2646 pCtx->rsp = NewRsp.u;
2647 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
2648
2649 return VINF_SUCCESS;
2650}
2651
2652
2653
2654/**
2655 * Implements leave.
2656 *
2657 * We're doing this in C because messing with the stack registers is annoying
2658 * since they depends on SS attributes.
2659 *
2660 * @param enmEffOpSize The effective operand size.
2661 */
2662IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
2663{
2664 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2665
2666 /* Calculate the intermediate RSP from RBP and the stack attributes. */
2667 RTUINT64U NewRsp;
2668 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2669 NewRsp.u = pCtx->rbp;
2670 else if (pCtx->ss.Attr.n.u1DefBig)
2671 NewRsp.u = pCtx->ebp;
2672 else
2673 {
2674 /** @todo Check that LEAVE actually preserve the high EBP bits. */
2675 NewRsp.u = pCtx->rsp;
2676 NewRsp.Words.w0 = pCtx->bp;
2677 }
2678
2679 /* Pop RBP according to the operand size. */
2680 VBOXSTRICTRC rcStrict;
2681 RTUINT64U NewRbp;
2682 switch (enmEffOpSize)
2683 {
2684 case IEMMODE_16BIT:
2685 NewRbp.u = pCtx->rbp;
2686 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
2687 break;
2688 case IEMMODE_32BIT:
2689 NewRbp.u = 0;
2690 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
2691 break;
2692 case IEMMODE_64BIT:
2693 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
2694 break;
2695 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2696 }
2697 if (rcStrict != VINF_SUCCESS)
2698 return rcStrict;
2699
2700
2701 /* Commit it. */
2702 pCtx->rbp = NewRbp.u;
2703 pCtx->rsp = NewRsp.u;
2704 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
2705
2706 return VINF_SUCCESS;
2707}
2708
2709
2710/**
2711 * Implements int3 and int XX.
2712 *
2713 * @param u8Int The interrupt vector number.
2714 * @param fIsBpInstr Is it the breakpoint instruction.
2715 */
2716IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
2717{
2718 Assert(pIemCpu->cXcptRecursions == 0);
2719 return iemRaiseXcptOrInt(pIemCpu,
2720 cbInstr,
2721 u8Int,
2722 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
2723 0,
2724 0);
2725}
2726
2727
2728/**
2729 * Implements iret for real mode and V8086 mode.
2730 *
2731 * @param enmEffOpSize The effective operand size.
2732 */
2733IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
2734{
2735 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2736 X86EFLAGS Efl;
2737 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
2738 NOREF(cbInstr);
2739
2740 /*
2741 * iret throws an exception if VME isn't enabled.
2742 */
2743 if ( Efl.Bits.u1VM
2744 && Efl.Bits.u2IOPL != 3
2745 && !(pCtx->cr4 & X86_CR4_VME))
2746 return iemRaiseGeneralProtectionFault0(pIemCpu);
2747
2748 /*
2749 * Do the stack bits, but don't commit RSP before everything checks
2750 * out right.
2751 */
2752 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2753 VBOXSTRICTRC rcStrict;
2754 RTCPTRUNION uFrame;
2755 uint16_t uNewCs;
2756 uint32_t uNewEip;
2757 uint32_t uNewFlags;
2758 uint64_t uNewRsp;
2759 if (enmEffOpSize == IEMMODE_32BIT)
2760 {
2761 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
2762 if (rcStrict != VINF_SUCCESS)
2763 return rcStrict;
2764 uNewEip = uFrame.pu32[0];
2765 if (uNewEip > UINT16_MAX)
2766 return iemRaiseGeneralProtectionFault0(pIemCpu);
2767
2768 uNewCs = (uint16_t)uFrame.pu32[1];
2769 uNewFlags = uFrame.pu32[2];
2770 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2771 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
2772 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
2773 | X86_EFL_ID;
2774 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
2775 }
2776 else
2777 {
2778 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
2779 if (rcStrict != VINF_SUCCESS)
2780 return rcStrict;
2781 uNewEip = uFrame.pu16[0];
2782 uNewCs = uFrame.pu16[1];
2783 uNewFlags = uFrame.pu16[2];
2784 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2785 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
2786 uNewFlags |= Efl.u & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF);
2787 /** @todo The intel pseudo code does not indicate what happens to
2788 * reserved flags. We just ignore them. */
2789#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2790 /* Ancient CPU adjustments: See iemCImpl_popf. */
2791 if (pIemCpu->uTargetCpu == IEMTARGETCPU_286)
2792 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL);
2793#endif
2794 }
2795 /** @todo Check how this is supposed to work if sp=0xfffe. */
2796 Log7(("iemCImpl_iret_real_v8086: uNewCs=%#06x uNewRip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n",
2797 uNewCs, uNewEip, uNewFlags, uNewRsp));
2798
2799 /*
2800 * Check the limit of the new EIP.
2801 */
2802 /** @todo Only the AMD pseudo code check the limit here, what's
2803 * right? */
2804 if (uNewEip > pCtx->cs.u32Limit)
2805 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2806
2807 /*
2808 * V8086 checks and flag adjustments
2809 */
2810 if (Efl.Bits.u1VM)
2811 {
2812 if (Efl.Bits.u2IOPL == 3)
2813 {
2814 /* Preserve IOPL and clear RF. */
2815 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
2816 uNewFlags |= Efl.u & (X86_EFL_IOPL);
2817 }
2818 else if ( enmEffOpSize == IEMMODE_16BIT
2819 && ( !(uNewFlags & X86_EFL_IF)
2820 || !Efl.Bits.u1VIP )
2821 && !(uNewFlags & X86_EFL_TF) )
2822 {
2823 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
2824 uNewFlags &= ~X86_EFL_VIF;
2825 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
2826 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
2827 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
2828 }
2829 else
2830 return iemRaiseGeneralProtectionFault0(pIemCpu);
2831 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags));
2832 }
2833
2834 /*
2835 * Commit the operation.
2836 */
2837 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
2838 if (rcStrict != VINF_SUCCESS)
2839 return rcStrict;
2840#ifdef DBGFTRACE_ENABLED
2841 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
2842 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewRsp);
2843#endif
2844
2845 pCtx->rip = uNewEip;
2846 pCtx->cs.Sel = uNewCs;
2847 pCtx->cs.ValidSel = uNewCs;
2848 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2849 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2850 /** @todo do we load attribs and limit as well? */
2851 Assert(uNewFlags & X86_EFL_1);
2852 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags);
2853
2854 return VINF_SUCCESS;
2855}
2856
2857
2858/**
2859 * Loads a segment register when entering V8086 mode.
2860 *
2861 * @param pSReg The segment register.
2862 * @param uSeg The segment to load.
2863 */
2864static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
2865{
2866 pSReg->Sel = uSeg;
2867 pSReg->ValidSel = uSeg;
2868 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2869 pSReg->u64Base = (uint32_t)uSeg << 4;
2870 pSReg->u32Limit = 0xffff;
2871 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
2872 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
2873 * IRET'ing to V8086. */
2874}
2875
2876
2877/**
2878 * Implements iret for protected mode returning to V8086 mode.
2879 *
2880 * @param pCtx Pointer to the CPU context.
2881 * @param uNewEip The new EIP.
2882 * @param uNewCs The new CS.
2883 * @param uNewFlags The new EFLAGS.
2884 * @param uNewRsp The RSP after the initial IRET frame.
2885 *
2886 * @note This can only be a 32-bit iret du to the X86_EFL_VM position.
2887 */
2888IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uint16_t, uNewCs,
2889 uint32_t, uNewFlags, uint64_t, uNewRsp)
2890{
2891 /*
2892 * Pop the V8086 specific frame bits off the stack.
2893 */
2894 VBOXSTRICTRC rcStrict;
2895 RTCPTRUNION uFrame;
2896 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 24, &uFrame.pv, &uNewRsp);
2897 if (rcStrict != VINF_SUCCESS)
2898 return rcStrict;
2899 uint32_t uNewEsp = uFrame.pu32[0];
2900 uint16_t uNewSs = uFrame.pu32[1];
2901 uint16_t uNewEs = uFrame.pu32[2];
2902 uint16_t uNewDs = uFrame.pu32[3];
2903 uint16_t uNewFs = uFrame.pu32[4];
2904 uint16_t uNewGs = uFrame.pu32[5];
2905 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2906 if (rcStrict != VINF_SUCCESS)
2907 return rcStrict;
2908
2909 /*
2910 * Commit the operation.
2911 */
2912 uNewFlags &= X86_EFL_LIVE_MASK;
2913 uNewFlags |= X86_EFL_RA1_MASK;
2914#ifdef DBGFTRACE_ENABLED
2915 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
2916 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);
2917#endif
2918
2919 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags);
2920 iemCImplCommonV8086LoadSeg(&pCtx->cs, uNewCs);
2921 iemCImplCommonV8086LoadSeg(&pCtx->ss, uNewSs);
2922 iemCImplCommonV8086LoadSeg(&pCtx->es, uNewEs);
2923 iemCImplCommonV8086LoadSeg(&pCtx->ds, uNewDs);
2924 iemCImplCommonV8086LoadSeg(&pCtx->fs, uNewFs);
2925 iemCImplCommonV8086LoadSeg(&pCtx->gs, uNewGs);
2926 pCtx->rip = uNewEip;
2927 pCtx->rsp = uNewEsp;
2928 pIemCpu->uCpl = 3;
2929
2930 return VINF_SUCCESS;
2931}
2932
2933
2934/**
2935 * Implements iret for protected mode returning via a nested task.
2936 *
2937 * @param enmEffOpSize The effective operand size.
2938 */
2939IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
2940{
2941 Log7(("iemCImpl_iret_prot_NestedTask:\n"));
2942#ifndef IEM_IMPLEMENTS_TASKSWITCH
2943 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2944#else
2945 /*
2946 * Read the segment selector in the link-field of the current TSS.
2947 */
2948 RTSEL uSelRet;
2949 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2950 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pIemCpu, &uSelRet, UINT8_MAX, pCtx->tr.u64Base);
2951 if (rcStrict != VINF_SUCCESS)
2952 return rcStrict;
2953
2954 /*
2955 * Fetch the returning task's TSS descriptor from the GDT.
2956 */
2957 if (uSelRet & X86_SEL_LDT)
2958 {
2959 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet));
2960 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet);
2961 }
2962
2963 IEMSELDESC TssDesc;
2964 rcStrict = iemMemFetchSelDesc(pIemCpu, &TssDesc, uSelRet, X86_XCPT_GP);
2965 if (rcStrict != VINF_SUCCESS)
2966 return rcStrict;
2967
2968 if (TssDesc.Legacy.Gate.u1DescType)
2969 {
2970 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet));
2971 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
2972 }
2973
2974 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
2975 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
2976 {
2977 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
2978 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
2979 }
2980
2981 if (!TssDesc.Legacy.Gate.u1Present)
2982 {
2983 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet));
2984 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
2985 }
2986
2987 uint32_t uNextEip = pCtx->eip + cbInstr;
2988 return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
2989 0 /* uCr2 */, uSelRet, &TssDesc);
2990#endif
2991}
2992
2993
2994/**
2995 * Implements iret for protected mode
2996 *
2997 * @param enmEffOpSize The effective operand size.
2998 */
2999IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
3000{
3001 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3002 NOREF(cbInstr);
3003
3004 /*
3005 * Nested task return.
3006 */
3007 if (pCtx->eflags.Bits.u1NT)
3008 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
3009
3010 /*
3011 * Normal return.
3012 *
3013 * Do the stack bits, but don't commit RSP before everything checks
3014 * out right.
3015 */
3016 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3017 VBOXSTRICTRC rcStrict;
3018 RTCPTRUNION uFrame;
3019 uint16_t uNewCs;
3020 uint32_t uNewEip;
3021 uint32_t uNewFlags;
3022 uint64_t uNewRsp;
3023 if (enmEffOpSize == IEMMODE_32BIT)
3024 {
3025 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
3026 if (rcStrict != VINF_SUCCESS)
3027 return rcStrict;
3028 uNewEip = uFrame.pu32[0];
3029 uNewCs = (uint16_t)uFrame.pu32[1];
3030 uNewFlags = uFrame.pu32[2];
3031 }
3032 else
3033 {
3034 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
3035 if (rcStrict != VINF_SUCCESS)
3036 return rcStrict;
3037 uNewEip = uFrame.pu16[0];
3038 uNewCs = uFrame.pu16[1];
3039 uNewFlags = uFrame.pu16[2];
3040 }
3041 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3042 if (rcStrict != VINF_SUCCESS)
3043 return rcStrict;
3044 Log7(("iemCImpl_iret_prot: uNewCs=%#06x uNewEip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n", uNewCs, uNewEip, uNewFlags, uNewRsp));
3045
3046 /*
3047 * We're hopefully not returning to V8086 mode...
3048 */
3049 if ( (uNewFlags & X86_EFL_VM)
3050 && pIemCpu->uCpl == 0)
3051 {
3052 Assert(enmEffOpSize == IEMMODE_32BIT);
3053 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, pCtx, uNewEip, uNewCs, uNewFlags, uNewRsp);
3054 }
3055
3056 /*
3057 * Protected mode.
3058 */
3059 /* Read the CS descriptor. */
3060 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3061 {
3062 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
3063 return iemRaiseGeneralProtectionFault0(pIemCpu);
3064 }
3065
3066 IEMSELDESC DescCS;
3067 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs, X86_XCPT_GP);
3068 if (rcStrict != VINF_SUCCESS)
3069 {
3070 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
3071 return rcStrict;
3072 }
3073
3074 /* Must be a code descriptor. */
3075 if (!DescCS.Legacy.Gen.u1DescType)
3076 {
3077 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3078 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3079 }
3080 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3081 {
3082 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3083 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3084 }
3085
3086#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3087 /* Raw ring-0 and ring-1 compression adjustments for PATM performance tricks and other CS leaks. */
3088 PVM pVM = IEMCPU_TO_VM(pIemCpu);
3089 if (EMIsRawRing0Enabled(pVM) && !HMIsEnabled(pVM))
3090 {
3091 if ((uNewCs & X86_SEL_RPL) == 1)
3092 {
3093 if ( pIemCpu->uCpl == 0
3094 && ( !EMIsRawRing1Enabled(pVM)
3095 || pCtx->cs.Sel == (uNewCs & X86_SEL_MASK_OFF_RPL)) )
3096 {
3097 Log(("iret: Ring-0 compression fix: uNewCS=%#x -> %#x\n", uNewCs, uNewCs & X86_SEL_MASK_OFF_RPL));
3098 uNewCs &= X86_SEL_MASK_OFF_RPL;
3099 }
3100# ifdef LOG_ENABLED
3101 else if (pIemCpu->uCpl <= 1 && EMIsRawRing1Enabled(pVM))
3102 Log(("iret: uNewCs=%#x genuine return to ring-1.\n", uNewCs));
3103# endif
3104 }
3105 else if ( (uNewCs & X86_SEL_RPL) == 2
3106 && EMIsRawRing1Enabled(pVM)
3107 && pIemCpu->uCpl <= 1)
3108 {
3109 Log(("iret: Ring-1 compression fix: uNewCS=%#x -> %#x\n", uNewCs, (uNewCs & X86_SEL_MASK_OFF_RPL) | 1));
3110 uNewCs = (uNewCs & X86_SEL_MASK_OFF_RPL) | 2;
3111 }
3112 }
3113#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
3114
3115
3116 /* Privilege checks. */
3117 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
3118 {
3119 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
3120 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3121 }
3122 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3123 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3124 {
3125 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3126 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3127 }
3128
3129 /* Present? */
3130 if (!DescCS.Legacy.Gen.u1Present)
3131 {
3132 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
3133 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
3134 }
3135
3136 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3137
3138 /*
3139 * Return to outer level?
3140 */
3141 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
3142 {
3143 uint16_t uNewSS;
3144 uint32_t uNewESP;
3145 if (enmEffOpSize == IEMMODE_32BIT)
3146 {
3147 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
3148 if (rcStrict != VINF_SUCCESS)
3149 return rcStrict;
3150/** @todo We might be popping a 32-bit ESP from the IRET frame, but whether
3151 * 16-bit or 32-bit are being loaded into SP depends on the D/B
3152 * bit of the popped SS selector it turns out. */
3153 uNewESP = uFrame.pu32[0];
3154 uNewSS = (uint16_t)uFrame.pu32[1];
3155 }
3156 else
3157 {
3158 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 4, &uFrame.pv, &uNewRsp);
3159 if (rcStrict != VINF_SUCCESS)
3160 return rcStrict;
3161 uNewESP = uFrame.pu16[0];
3162 uNewSS = uFrame.pu16[1];
3163 }
3164 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
3165 if (rcStrict != VINF_SUCCESS)
3166 return rcStrict;
3167 Log7(("iemCImpl_iret_prot: uNewSS=%#06x uNewESP=%#010x\n", uNewSS, uNewESP));
3168
3169 /* Read the SS descriptor. */
3170 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3171 {
3172 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
3173 return iemRaiseGeneralProtectionFault0(pIemCpu);
3174 }
3175
3176 IEMSELDESC DescSS;
3177 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
3178 if (rcStrict != VINF_SUCCESS)
3179 {
3180 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
3181 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
3182 return rcStrict;
3183 }
3184
3185 /* Privilege checks. */
3186 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3187 {
3188 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
3189 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3190 }
3191 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3192 {
3193 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
3194 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
3195 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3196 }
3197
3198 /* Must be a writeable data segment descriptor. */
3199 if (!DescSS.Legacy.Gen.u1DescType)
3200 {
3201 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
3202 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3203 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3204 }
3205 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3206 {
3207 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
3208 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3209 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
3210 }
3211
3212 /* Present? */
3213 if (!DescSS.Legacy.Gen.u1Present)
3214 {
3215 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
3216 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
3217 }
3218
3219 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3220
3221 /* Check EIP. */
3222 if (uNewEip > cbLimitCS)
3223 {
3224 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
3225 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
3226 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3227 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3228 }
3229
3230 /*
3231 * Commit the changes, marking CS and SS accessed first since
3232 * that may fail.
3233 */
3234 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3235 {
3236 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
3237 if (rcStrict != VINF_SUCCESS)
3238 return rcStrict;
3239 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3240 }
3241 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3242 {
3243 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
3244 if (rcStrict != VINF_SUCCESS)
3245 return rcStrict;
3246 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3247 }
3248
3249 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3250 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3251 if (enmEffOpSize != IEMMODE_16BIT)
3252 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3253 if (pIemCpu->uCpl == 0)
3254 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3255 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
3256 fEFlagsMask |= X86_EFL_IF;
3257 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
3258 fEFlagsNew &= ~fEFlagsMask;
3259 fEFlagsNew |= uNewFlags & fEFlagsMask;
3260#ifdef DBGFTRACE_ENABLED
3261 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
3262 pIemCpu->uCpl, uNewCs & X86_SEL_RPL, pCtx->cs.Sel, pCtx->eip,
3263 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP);
3264#endif
3265
3266 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
3267 pCtx->rip = uNewEip;
3268 pCtx->cs.Sel = uNewCs;
3269 pCtx->cs.ValidSel = uNewCs;
3270 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3271 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3272 pCtx->cs.u32Limit = cbLimitCS;
3273 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3274 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
3275 if (!pCtx->ss.Attr.n.u1DefBig)
3276 pCtx->sp = (uint16_t)uNewESP;
3277 else
3278 pCtx->rsp = uNewESP;
3279 pCtx->ss.Sel = uNewSS;
3280 pCtx->ss.ValidSel = uNewSS;
3281 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3282 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3283 pCtx->ss.u32Limit = cbLimitSs;
3284 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3285
3286 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
3287 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
3288 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
3289 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
3290 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
3291
3292 /* Done! */
3293
3294 }
3295 /*
3296 * Return to the same level.
3297 */
3298 else
3299 {
3300 /* Check EIP. */
3301 if (uNewEip > cbLimitCS)
3302 {
3303 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
3304 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3305 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3306 }
3307
3308 /*
3309 * Commit the changes, marking CS first since it may fail.
3310 */
3311 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3312 {
3313 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
3314 if (rcStrict != VINF_SUCCESS)
3315 return rcStrict;
3316 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3317 }
3318
3319 X86EFLAGS NewEfl;
3320 NewEfl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
3321 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3322 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3323 if (enmEffOpSize != IEMMODE_16BIT)
3324 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3325 if (pIemCpu->uCpl == 0)
3326 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3327 else if (pIemCpu->uCpl <= NewEfl.Bits.u2IOPL)
3328 fEFlagsMask |= X86_EFL_IF;
3329 NewEfl.u &= ~fEFlagsMask;
3330 NewEfl.u |= fEFlagsMask & uNewFlags;
3331#ifdef DBGFTRACE_ENABLED
3332 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
3333 pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip,
3334 uNewCs, uNewEip, uNewFlags, pCtx->ss.Sel, uNewRsp);
3335#endif
3336
3337 IEMMISC_SET_EFL(pIemCpu, pCtx, NewEfl.u);
3338 pCtx->rip = uNewEip;
3339 pCtx->cs.Sel = uNewCs;
3340 pCtx->cs.ValidSel = uNewCs;
3341 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3342 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3343 pCtx->cs.u32Limit = cbLimitCS;
3344 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3345 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
3346 pCtx->rsp = uNewRsp;
3347 /* Done! */
3348 }
3349 return VINF_SUCCESS;
3350}
3351
3352
3353/**
3354 * Implements iret for long mode
3355 *
3356 * @param enmEffOpSize The effective operand size.
3357 */
3358IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
3359{
3360 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3361 NOREF(cbInstr);
3362
3363 /*
3364 * Nested task return is not supported in long mode.
3365 */
3366 if (pCtx->eflags.Bits.u1NT)
3367 {
3368 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pCtx->eflags.u));
3369 return iemRaiseGeneralProtectionFault0(pIemCpu);
3370 }
3371
3372 /*
3373 * Normal return.
3374 *
3375 * Do the stack bits, but don't commit RSP before everything checks
3376 * out right.
3377 */
3378 VBOXSTRICTRC rcStrict;
3379 RTCPTRUNION uFrame;
3380 uint64_t uNewRip;
3381 uint16_t uNewCs;
3382 uint16_t uNewSs;
3383 uint32_t uNewFlags;
3384 uint64_t uNewRsp;
3385 if (enmEffOpSize == IEMMODE_64BIT)
3386 {
3387 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*8, &uFrame.pv, &uNewRsp);
3388 if (rcStrict != VINF_SUCCESS)
3389 return rcStrict;
3390 uNewRip = uFrame.pu64[0];
3391 uNewCs = (uint16_t)uFrame.pu64[1];
3392 uNewFlags = (uint32_t)uFrame.pu64[2];
3393 uNewRsp = uFrame.pu64[3];
3394 uNewSs = (uint16_t)uFrame.pu64[4];
3395 }
3396 else if (enmEffOpSize == IEMMODE_32BIT)
3397 {
3398 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*4, &uFrame.pv, &uNewRsp);
3399 if (rcStrict != VINF_SUCCESS)
3400 return rcStrict;
3401 uNewRip = uFrame.pu32[0];
3402 uNewCs = (uint16_t)uFrame.pu32[1];
3403 uNewFlags = uFrame.pu32[2];
3404 uNewRsp = uFrame.pu32[3];
3405 uNewSs = (uint16_t)uFrame.pu32[4];
3406 }
3407 else
3408 {
3409 Assert(enmEffOpSize == IEMMODE_16BIT);
3410 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 5*2, &uFrame.pv, &uNewRsp);
3411 if (rcStrict != VINF_SUCCESS)
3412 return rcStrict;
3413 uNewRip = uFrame.pu16[0];
3414 uNewCs = uFrame.pu16[1];
3415 uNewFlags = uFrame.pu16[2];
3416 uNewRsp = uFrame.pu16[3];
3417 uNewSs = uFrame.pu16[4];
3418 }
3419 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3420 if (rcStrict != VINF_SUCCESS)
3421 return rcStrict;
3422 Log7(("iretq stack: cs:rip=%04x:%016RX64 rflags=%016RX64 ss:rsp=%04x:%016RX64\n", uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
3423
3424 /*
3425 * Check stuff.
3426 */
3427 /* Read the CS descriptor. */
3428 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3429 {
3430 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3431 return iemRaiseGeneralProtectionFault0(pIemCpu);
3432 }
3433
3434 IEMSELDESC DescCS;
3435 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs, X86_XCPT_GP);
3436 if (rcStrict != VINF_SUCCESS)
3437 {
3438 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
3439 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3440 return rcStrict;
3441 }
3442
3443 /* Must be a code descriptor. */
3444 if ( !DescCS.Legacy.Gen.u1DescType
3445 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3446 {
3447 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
3448 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3449 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3450 }
3451
3452 /* Privilege checks. */
3453 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
3454 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
3455 {
3456 Log(("iret %04x:%016RX64/%04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp, pIemCpu->uCpl));
3457 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3458 }
3459 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3460 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3461 {
3462 Log(("iret %04x:%016RX64/%04x:%016RX64 - RPL < DPL (%d) -> #GP\n",
3463 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u2Dpl));
3464 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
3465 }
3466
3467 /* Present? */
3468 if (!DescCS.Legacy.Gen.u1Present)
3469 {
3470 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3471 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
3472 }
3473
3474 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3475
3476 /* Read the SS descriptor. */
3477 IEMSELDESC DescSS;
3478 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3479 {
3480 if ( !DescCS.Legacy.Gen.u1Long
3481 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
3482 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
3483 {
3484 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3485 return iemRaiseGeneralProtectionFault0(pIemCpu);
3486 }
3487 DescSS.Legacy.u = 0;
3488 }
3489 else
3490 {
3491 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
3492 if (rcStrict != VINF_SUCCESS)
3493 {
3494 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
3495 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3496 return rcStrict;
3497 }
3498 }
3499
3500 /* Privilege checks. */
3501 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3502 {
3503 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3504 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3505 }
3506
3507 uint32_t cbLimitSs;
3508 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3509 cbLimitSs = UINT32_MAX;
3510 else
3511 {
3512 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3513 {
3514 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
3515 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
3516 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3517 }
3518
3519 /* Must be a writeable data segment descriptor. */
3520 if (!DescSS.Legacy.Gen.u1DescType)
3521 {
3522 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
3523 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3524 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3525 }
3526 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3527 {
3528 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
3529 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3530 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSs);
3531 }
3532
3533 /* Present? */
3534 if (!DescSS.Legacy.Gen.u1Present)
3535 {
3536 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3537 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSs);
3538 }
3539 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3540 }
3541
3542 /* Check EIP. */
3543 if (DescCS.Legacy.Gen.u1Long)
3544 {
3545 if (!IEM_IS_CANONICAL(uNewRip))
3546 {
3547 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
3548 uNewCs, uNewRip, uNewSs, uNewRsp));
3549 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3550 }
3551 }
3552 else
3553 {
3554 if (uNewRip > cbLimitCS)
3555 {
3556 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
3557 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
3558 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3559 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
3560 }
3561 }
3562
3563 /*
3564 * Commit the changes, marking CS and SS accessed first since
3565 * that may fail.
3566 */
3567 /** @todo where exactly are these actually marked accessed by a real CPU? */
3568 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3569 {
3570 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
3571 if (rcStrict != VINF_SUCCESS)
3572 return rcStrict;
3573 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3574 }
3575 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3576 {
3577 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSs);
3578 if (rcStrict != VINF_SUCCESS)
3579 return rcStrict;
3580 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3581 }
3582
3583 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3584 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3585 if (enmEffOpSize != IEMMODE_16BIT)
3586 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3587 if (pIemCpu->uCpl == 0)
3588 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
3589 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
3590 fEFlagsMask |= X86_EFL_IF;
3591 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
3592 fEFlagsNew &= ~fEFlagsMask;
3593 fEFlagsNew |= uNewFlags & fEFlagsMask;
3594#ifdef DBGFTRACE_ENABLED
3595 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
3596 pIemCpu->uCpl, uNewCpl, pCtx->rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);
3597#endif
3598
3599 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
3600 pCtx->rip = uNewRip;
3601 pCtx->cs.Sel = uNewCs;
3602 pCtx->cs.ValidSel = uNewCs;
3603 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3604 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3605 pCtx->cs.u32Limit = cbLimitCS;
3606 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3607 pIemCpu->enmCpuMode = iemCalcCpuMode(pCtx);
3608 if (pCtx->cs.Attr.n.u1Long || pCtx->cs.Attr.n.u1DefBig)
3609 pCtx->rsp = uNewRsp;
3610 else
3611 pCtx->sp = (uint16_t)uNewRsp;
3612 pCtx->ss.Sel = uNewSs;
3613 pCtx->ss.ValidSel = uNewSs;
3614 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3615 {
3616 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3617 pCtx->ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
3618 pCtx->ss.u32Limit = UINT32_MAX;
3619 pCtx->ss.u64Base = 0;
3620 Log2(("iretq new SS: NULL\n"));
3621 }
3622 else
3623 {
3624 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3625 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3626 pCtx->ss.u32Limit = cbLimitSs;
3627 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3628 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u));
3629 }
3630
3631 if (pIemCpu->uCpl != uNewCpl)
3632 {
3633 pIemCpu->uCpl = uNewCpl;
3634 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->ds);
3635 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->es);
3636 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->fs);
3637 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCpl, &pCtx->gs);
3638 }
3639
3640 return VINF_SUCCESS;
3641}
3642
3643
3644/**
3645 * Implements iret.
3646 *
3647 * @param enmEffOpSize The effective operand size.
3648 */
3649IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
3650{
3651 /*
3652 * First, clear NMI blocking, if any, before causing any exceptions.
3653 */
3654 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3655 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
3656
3657 /*
3658 * Call a mode specific worker.
3659 */
3660 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3661 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
3662 if (IEM_IS_LONG_MODE(pIemCpu))
3663 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
3664 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
3665}
3666
3667
3668/**
3669 * Implements SYSCALL (AMD and Intel64).
3670 *
3671 * @param enmEffOpSize The effective operand size.
3672 */
3673IEM_CIMPL_DEF_0(iemCImpl_syscall)
3674{
3675 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3676
3677 /*
3678 * Check preconditions.
3679 *
3680 * Note that CPUs described in the documentation may load a few odd values
3681 * into CS and SS than we allow here. This has yet to be checked on real
3682 * hardware.
3683 */
3684 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
3685 {
3686 Log(("syscall: Not enabled in EFER -> #UD\n"));
3687 return iemRaiseUndefinedOpcode(pIemCpu);
3688 }
3689 if (!(pCtx->cr0 & X86_CR0_PE))
3690 {
3691 Log(("syscall: Protected mode is required -> #GP(0)\n"));
3692 return iemRaiseGeneralProtectionFault0(pIemCpu);
3693 }
3694 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !CPUMIsGuestInLongModeEx(pCtx))
3695 {
3696 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3697 return iemRaiseUndefinedOpcode(pIemCpu);
3698 }
3699
3700 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
3701 /** @todo what about LDT selectors? Shouldn't matter, really. */
3702 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
3703 uint16_t uNewSs = uNewCs + 8;
3704 if (uNewCs == 0 || uNewSs == 0)
3705 {
3706 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
3707 return iemRaiseGeneralProtectionFault0(pIemCpu);
3708 }
3709
3710 /* Long mode and legacy mode differs. */
3711 if (CPUMIsGuestInLongModeEx(pCtx))
3712 {
3713 uint64_t uNewRip = pIemCpu->enmCpuMode == IEMMODE_64BIT ? pCtx->msrLSTAR : pCtx-> msrCSTAR;
3714
3715 /* This test isn't in the docs, but I'm not trusting the guys writing
3716 the MSRs to have validated the values as canonical like they should. */
3717 if (!IEM_IS_CANONICAL(uNewRip))
3718 {
3719 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3720 return iemRaiseUndefinedOpcode(pIemCpu);
3721 }
3722
3723 /*
3724 * Commit it.
3725 */
3726 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, uNewRip));
3727 pCtx->rcx = pCtx->rip + cbInstr;
3728 pCtx->rip = uNewRip;
3729
3730 pCtx->rflags.u &= ~X86_EFL_RF;
3731 pCtx->r11 = pCtx->rflags.u;
3732 pCtx->rflags.u &= ~pCtx->msrSFMASK;
3733 pCtx->rflags.u |= X86_EFL_1;
3734
3735 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3736 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3737 }
3738 else
3739 {
3740 /*
3741 * Commit it.
3742 */
3743 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n",
3744 pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, (uint32_t)(pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
3745 pCtx->rcx = pCtx->eip + cbInstr;
3746 pCtx->rip = pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
3747 pCtx->rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
3748
3749 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3750 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3751 }
3752 pCtx->cs.Sel = uNewCs;
3753 pCtx->cs.ValidSel = uNewCs;
3754 pCtx->cs.u64Base = 0;
3755 pCtx->cs.u32Limit = UINT32_MAX;
3756 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3757
3758 pCtx->ss.Sel = uNewSs;
3759 pCtx->ss.ValidSel = uNewSs;
3760 pCtx->ss.u64Base = 0;
3761 pCtx->ss.u32Limit = UINT32_MAX;
3762 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3763
3764 return VINF_SUCCESS;
3765}
3766
3767
3768/**
3769 * Implements SYSRET (AMD and Intel64).
3770 */
3771IEM_CIMPL_DEF_0(iemCImpl_sysret)
3772
3773{
3774 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3775
3776 /*
3777 * Check preconditions.
3778 *
3779 * Note that CPUs described in the documentation may load a few odd values
3780 * into CS and SS than we allow here. This has yet to be checked on real
3781 * hardware.
3782 */
3783 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
3784 {
3785 Log(("sysret: Not enabled in EFER -> #UD\n"));
3786 return iemRaiseUndefinedOpcode(pIemCpu);
3787 }
3788 if (IEM_IS_GUEST_CPU_INTEL(pIemCpu) && !CPUMIsGuestInLongModeEx(pCtx))
3789 {
3790 Log(("sysret: Only available in long mode on intel -> #UD\n"));
3791 return iemRaiseUndefinedOpcode(pIemCpu);
3792 }
3793 if (!(pCtx->cr0 & X86_CR0_PE))
3794 {
3795 Log(("sysret: Protected mode is required -> #GP(0)\n"));
3796 return iemRaiseGeneralProtectionFault0(pIemCpu);
3797 }
3798 if (pIemCpu->uCpl != 0)
3799 {
3800 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", pIemCpu->uCpl));
3801 return iemRaiseGeneralProtectionFault0(pIemCpu);
3802 }
3803
3804 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
3805 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
3806 uint16_t uNewSs = uNewCs + 8;
3807 if (pIemCpu->enmEffOpSize == IEMMODE_64BIT)
3808 uNewCs += 16;
3809 if (uNewCs == 0 || uNewSs == 0)
3810 {
3811 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
3812 return iemRaiseGeneralProtectionFault0(pIemCpu);
3813 }
3814
3815 /*
3816 * Commit it.
3817 */
3818 if (CPUMIsGuestInLongModeEx(pCtx))
3819 {
3820 if (pIemCpu->enmEffOpSize == IEMMODE_64BIT)
3821 {
3822 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n",
3823 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->rcx, pCtx->r11));
3824 /* Note! We disregard intel manual regarding the RCX cananonical
3825 check, ask intel+xen why AMD doesn't do it. */
3826 pCtx->rip = pCtx->rcx;
3827 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
3828 | (3 << X86DESCATTR_DPL_SHIFT);
3829 }
3830 else
3831 {
3832 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n",
3833 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->ecx, pCtx->r11));
3834 pCtx->rip = pCtx->ecx;
3835 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
3836 | (3 << X86DESCATTR_DPL_SHIFT);
3837 }
3838 /** @todo testcase: See what kind of flags we can make SYSRET restore and
3839 * what it really ignores. RF and VM are hinted at being zero, by AMD. */
3840 pCtx->rflags.u = pCtx->r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
3841 pCtx->rflags.u |= X86_EFL_1;
3842 }
3843 else
3844 {
3845 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, pCtx->ecx));
3846 pCtx->rip = pCtx->rcx;
3847 pCtx->rflags.u |= X86_EFL_IF;
3848 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
3849 | (3 << X86DESCATTR_DPL_SHIFT);
3850 }
3851 pCtx->cs.Sel = uNewCs | 3;
3852 pCtx->cs.ValidSel = uNewCs | 3;
3853 pCtx->cs.u64Base = 0;
3854 pCtx->cs.u32Limit = UINT32_MAX;
3855 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3856
3857 pCtx->ss.Sel = uNewSs | 3;
3858 pCtx->ss.ValidSel = uNewSs | 3;
3859 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3860 /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */
3861 pCtx->ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
3862 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
3863 * on sysret. */
3864
3865 return VINF_SUCCESS;
3866}
3867
3868
3869/**
3870 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
3871 *
3872 * @param iSegReg The segment register number (valid).
3873 * @param uSel The new selector value.
3874 */
3875IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
3876{
3877 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
3878 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
3879 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
3880
3881 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
3882
3883 /*
3884 * Real mode and V8086 mode are easy.
3885 */
3886 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
3887 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3888 {
3889 *pSel = uSel;
3890 pHid->u64Base = (uint32_t)uSel << 4;
3891 pHid->ValidSel = uSel;
3892 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
3893#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
3894 /** @todo Does the CPU actually load limits and attributes in the
3895 * real/V8086 mode segment load case? It doesn't for CS in far
3896 * jumps... Affects unreal mode. */
3897 pHid->u32Limit = 0xffff;
3898 pHid->Attr.u = 0;
3899 pHid->Attr.n.u1Present = 1;
3900 pHid->Attr.n.u1DescType = 1;
3901 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
3902 ? X86_SEL_TYPE_RW
3903 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
3904#endif
3905 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
3906 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
3907 return VINF_SUCCESS;
3908 }
3909
3910 /*
3911 * Protected mode.
3912 *
3913 * Check if it's a null segment selector value first, that's OK for DS, ES,
3914 * FS and GS. If not null, then we have to load and parse the descriptor.
3915 */
3916 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3917 {
3918 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
3919 if (iSegReg == X86_SREG_SS)
3920 {
3921 /* In 64-bit kernel mode, the stack can be 0 because of the way
3922 interrupts are dispatched. AMD seems to have a slighly more
3923 relaxed relationship to SS.RPL than intel does. */
3924 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
3925 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
3926 || pIemCpu->uCpl > 2
3927 || ( uSel != pIemCpu->uCpl
3928 && !IEM_IS_GUEST_CPU_AMD(pIemCpu)) )
3929 {
3930 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
3931 return iemRaiseGeneralProtectionFault0(pIemCpu);
3932 }
3933 }
3934
3935 *pSel = uSel; /* Not RPL, remember :-) */
3936 iemHlpLoadNullDataSelectorProt(pIemCpu, pHid, uSel);
3937 if (iSegReg == X86_SREG_SS)
3938 pHid->Attr.u |= pIemCpu->uCpl << X86DESCATTR_DPL_SHIFT;
3939
3940 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
3941 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
3942
3943 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
3944 return VINF_SUCCESS;
3945 }
3946
3947 /* Fetch the descriptor. */
3948 IEMSELDESC Desc;
3949 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
3950 if (rcStrict != VINF_SUCCESS)
3951 return rcStrict;
3952
3953 /* Check GPs first. */
3954 if (!Desc.Legacy.Gen.u1DescType)
3955 {
3956 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
3957 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3958 }
3959 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
3960 {
3961 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3962 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3963 {
3964 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
3965 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3966 }
3967 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
3968 {
3969 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
3970 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3971 }
3972 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
3973 {
3974 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
3975 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3976 }
3977 }
3978 else
3979 {
3980 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3981 {
3982 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
3983 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3984 }
3985 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3986 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3987 {
3988#if 0 /* this is what intel says. */
3989 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3990 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
3991 {
3992 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
3993 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
3994 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
3995 }
3996#else /* this is what makes more sense. */
3997 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
3998 {
3999 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4000 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4001 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4002 }
4003 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4004 {
4005 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4006 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
4007 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
4008 }
4009#endif
4010 }
4011 }
4012
4013 /* Is it there? */
4014 if (!Desc.Legacy.Gen.u1Present)
4015 {
4016 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4017 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
4018 }
4019
4020 /* The base and limit. */
4021 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4022 uint64_t u64Base;
4023 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
4024 && iSegReg < X86_SREG_FS)
4025 u64Base = 0;
4026 else
4027 u64Base = X86DESC_BASE(&Desc.Legacy);
4028
4029 /*
4030 * Ok, everything checked out fine. Now set the accessed bit before
4031 * committing the result into the registers.
4032 */
4033 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4034 {
4035 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
4036 if (rcStrict != VINF_SUCCESS)
4037 return rcStrict;
4038 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4039 }
4040
4041 /* commit */
4042 *pSel = uSel;
4043 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4044 pHid->u32Limit = cbLimit;
4045 pHid->u64Base = u64Base;
4046 pHid->ValidSel = uSel;
4047 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4048
4049 /** @todo check if the hidden bits are loaded correctly for 64-bit
4050 * mode. */
4051 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
4052
4053 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
4054 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4055 return VINF_SUCCESS;
4056}
4057
4058
4059/**
4060 * Implements 'mov SReg, r/m'.
4061 *
4062 * @param iSegReg The segment register number (valid).
4063 * @param uSel The new selector value.
4064 */
4065IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
4066{
4067 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4068 if (rcStrict == VINF_SUCCESS)
4069 {
4070 if (iSegReg == X86_SREG_SS)
4071 {
4072 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4073 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4074 }
4075 }
4076 return rcStrict;
4077}
4078
4079
4080/**
4081 * Implements 'pop SReg'.
4082 *
4083 * @param iSegReg The segment register number (valid).
4084 * @param enmEffOpSize The efficient operand size (valid).
4085 */
4086IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4087{
4088 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4089 VBOXSTRICTRC rcStrict;
4090
4091 /*
4092 * Read the selector off the stack and join paths with mov ss, reg.
4093 */
4094 RTUINT64U TmpRsp;
4095 TmpRsp.u = pCtx->rsp;
4096 switch (enmEffOpSize)
4097 {
4098 case IEMMODE_16BIT:
4099 {
4100 uint16_t uSel;
4101 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
4102 if (rcStrict == VINF_SUCCESS)
4103 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4104 break;
4105 }
4106
4107 case IEMMODE_32BIT:
4108 {
4109 uint32_t u32Value;
4110 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
4111 if (rcStrict == VINF_SUCCESS)
4112 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
4113 break;
4114 }
4115
4116 case IEMMODE_64BIT:
4117 {
4118 uint64_t u64Value;
4119 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
4120 if (rcStrict == VINF_SUCCESS)
4121 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
4122 break;
4123 }
4124 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4125 }
4126
4127 /*
4128 * Commit the stack on success.
4129 */
4130 if (rcStrict == VINF_SUCCESS)
4131 {
4132 pCtx->rsp = TmpRsp.u;
4133 if (iSegReg == X86_SREG_SS)
4134 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4135 }
4136 return rcStrict;
4137}
4138
4139
4140/**
4141 * Implements lgs, lfs, les, lds & lss.
4142 */
4143IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
4144 uint16_t, uSel,
4145 uint64_t, offSeg,
4146 uint8_t, iSegReg,
4147 uint8_t, iGReg,
4148 IEMMODE, enmEffOpSize)
4149{
4150 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
4151 VBOXSTRICTRC rcStrict;
4152
4153 /*
4154 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
4155 */
4156 /** @todo verify and test that mov, pop and lXs works the segment
4157 * register loading in the exact same way. */
4158 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4159 if (rcStrict == VINF_SUCCESS)
4160 {
4161 switch (enmEffOpSize)
4162 {
4163 case IEMMODE_16BIT:
4164 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4165 break;
4166 case IEMMODE_32BIT:
4167 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4168 break;
4169 case IEMMODE_64BIT:
4170 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
4171 break;
4172 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4173 }
4174 }
4175
4176 return rcStrict;
4177}
4178
4179
4180/**
4181 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
4182 *
4183 * @retval VINF_SUCCESS on success.
4184 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
4185 * @retval iemMemFetchSysU64 return value.
4186 *
4187 * @param pIemCpu The IEM state of the calling EMT.
4188 * @param uSel The selector value.
4189 * @param fAllowSysDesc Whether system descriptors are OK or not.
4190 * @param pDesc Where to return the descriptor on success.
4191 */
4192static VBOXSTRICTRC iemCImpl_LoadDescHelper(PIEMCPU pIemCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
4193{
4194 pDesc->Long.au64[0] = 0;
4195 pDesc->Long.au64[1] = 0;
4196
4197 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */
4198 return VINF_IEM_SELECTOR_NOT_OK;
4199
4200 /* Within the table limits? */
4201 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4202 RTGCPTR GCPtrBase;
4203 if (uSel & X86_SEL_LDT)
4204 {
4205 if ( !pCtx->ldtr.Attr.n.u1Present
4206 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
4207 return VINF_IEM_SELECTOR_NOT_OK;
4208 GCPtrBase = pCtx->ldtr.u64Base;
4209 }
4210 else
4211 {
4212 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
4213 return VINF_IEM_SELECTOR_NOT_OK;
4214 GCPtrBase = pCtx->gdtr.pGdt;
4215 }
4216
4217 /* Fetch the descriptor. */
4218 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4219 if (rcStrict != VINF_SUCCESS)
4220 return rcStrict;
4221 if (!pDesc->Legacy.Gen.u1DescType)
4222 {
4223 if (!fAllowSysDesc)
4224 return VINF_IEM_SELECTOR_NOT_OK;
4225 if (CPUMIsGuestInLongModeEx(pCtx))
4226 {
4227 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
4228 if (rcStrict != VINF_SUCCESS)
4229 return rcStrict;
4230 }
4231
4232 }
4233
4234 return VINF_SUCCESS;
4235}
4236
4237
4238/**
4239 * Implements verr (fWrite = false) and verw (fWrite = true).
4240 */
4241IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite)
4242{
4243 Assert(!IEM_IS_REAL_OR_V86_MODE(pIemCpu));
4244
4245 /** @todo figure whether the accessed bit is set or not. */
4246
4247 bool fAccessible = true;
4248 IEMSELDESC Desc;
4249 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pIemCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4250 if (rcStrict == VINF_SUCCESS)
4251 {
4252 /* Check the descriptor, order doesn't matter much here. */
4253 if ( !Desc.Legacy.Gen.u1DescType
4254 || !Desc.Legacy.Gen.u1Present)
4255 fAccessible = false;
4256 else
4257 {
4258 if ( fWrite
4259 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE
4260 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4261 fAccessible = false;
4262
4263 /** @todo testcase for the conforming behavior. */
4264 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4265 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4266 {
4267 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4268 fAccessible = false;
4269 else if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4270 fAccessible = false;
4271 }
4272 }
4273
4274 }
4275 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4276 fAccessible = false;
4277 else
4278 return rcStrict;
4279
4280 /* commit */
4281 pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fAccessible;
4282
4283 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4284 return VINF_SUCCESS;
4285}
4286
4287
4288/**
4289 * Implements LAR and LSL with 64-bit operand size.
4290 *
4291 * @returns VINF_SUCCESS.
4292 * @param pu16Dst Pointer to the destination register.
4293 * @param uSel The selector to load details for.
4294 * @param pEFlags Pointer to the eflags register.
4295 * @param fIsLar true = LAR, false = LSL.
4296 */
4297IEM_CIMPL_DEF_4(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, uint32_t *, pEFlags, bool, fIsLar)
4298{
4299 Assert(!IEM_IS_REAL_OR_V86_MODE(pIemCpu));
4300
4301 /** @todo figure whether the accessed bit is set or not. */
4302
4303 bool fDescOk = true;
4304 IEMSELDESC Desc;
4305 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pIemCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4306 if (rcStrict == VINF_SUCCESS)
4307 {
4308 /*
4309 * Check the descriptor type.
4310 */
4311 if (!Desc.Legacy.Gen.u1DescType)
4312 {
4313 if (CPUMIsGuestInLongModeEx(pIemCpu->CTX_SUFF(pCtx)))
4314 {
4315 if (Desc.Long.Gen.u5Zeros)
4316 fDescOk = false;
4317 else
4318 switch (Desc.Long.Gen.u4Type)
4319 {
4320 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
4321 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
4322 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
4323 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
4324 break;
4325 case AMD64_SEL_TYPE_SYS_CALL_GATE:
4326 fDescOk = fIsLar;
4327 break;
4328 default:
4329 fDescOk = false;
4330 break;
4331 }
4332 }
4333 else
4334 {
4335 switch (Desc.Long.Gen.u4Type)
4336 {
4337 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4338 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4339 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4340 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4341 case X86_SEL_TYPE_SYS_LDT:
4342 break;
4343 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4344 case X86_SEL_TYPE_SYS_TASK_GATE:
4345 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4346 fDescOk = fIsLar;
4347 break;
4348 default:
4349 fDescOk = false;
4350 break;
4351 }
4352 }
4353 }
4354 if (fDescOk)
4355 {
4356 /*
4357 * Check the RPL/DPL/CPL interaction..
4358 */
4359 /** @todo testcase for the conforming behavior. */
4360 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
4361 || !Desc.Legacy.Gen.u1DescType)
4362 {
4363 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4364 fDescOk = false;
4365 else if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
4366 fDescOk = false;
4367 }
4368 }
4369
4370 if (fDescOk)
4371 {
4372 /*
4373 * All fine, start committing the result.
4374 */
4375 if (fIsLar)
4376 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00);
4377 else
4378 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy);
4379 }
4380
4381 }
4382 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4383 fDescOk = false;
4384 else
4385 return rcStrict;
4386
4387 /* commit flags value and advance rip. */
4388 pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fDescOk;
4389 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4390
4391 return VINF_SUCCESS;
4392}
4393
4394
4395/**
4396 * Implements LAR and LSL with 16-bit operand size.
4397 *
4398 * @returns VINF_SUCCESS.
4399 * @param pu16Dst Pointer to the destination register.
4400 * @param u16Sel The selector to load details for.
4401 * @param pEFlags Pointer to the eflags register.
4402 * @param fIsLar true = LAR, false = LSL.
4403 */
4404IEM_CIMPL_DEF_4(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, uint32_t *, pEFlags, bool, fIsLar)
4405{
4406 uint64_t u64TmpDst = *pu16Dst;
4407 IEM_CIMPL_CALL_4(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, pEFlags, fIsLar);
4408 *pu16Dst = (uint16_t)u64TmpDst;
4409 return VINF_SUCCESS;
4410}
4411
4412
4413/**
4414 * Implements lgdt.
4415 *
4416 * @param iEffSeg The segment of the new gdtr contents
4417 * @param GCPtrEffSrc The address of the new gdtr contents.
4418 * @param enmEffOpSize The effective operand size.
4419 */
4420IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4421{
4422 if (pIemCpu->uCpl != 0)
4423 return iemRaiseGeneralProtectionFault0(pIemCpu);
4424 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
4425
4426 /*
4427 * Fetch the limit and base address.
4428 */
4429 uint16_t cbLimit;
4430 RTGCPTR GCPtrBase;
4431 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4432 if (rcStrict == VINF_SUCCESS)
4433 {
4434 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4435 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
4436 else
4437 {
4438 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4439 pCtx->gdtr.cbGdt = cbLimit;
4440 pCtx->gdtr.pGdt = GCPtrBase;
4441 }
4442 if (rcStrict == VINF_SUCCESS)
4443 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4444 }
4445 return rcStrict;
4446}
4447
4448
4449/**
4450 * Implements sgdt.
4451 *
4452 * @param iEffSeg The segment where to store the gdtr content.
4453 * @param GCPtrEffDst The address where to store the gdtr content.
4454 * @param enmEffOpSize The effective operand size.
4455 */
4456IEM_CIMPL_DEF_3(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
4457{
4458 /*
4459 * Join paths with sidt.
4460 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4461 * you really must know.
4462 */
4463 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4464 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
4465 if (rcStrict == VINF_SUCCESS)
4466 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4467 return rcStrict;
4468}
4469
4470
4471/**
4472 * Implements lidt.
4473 *
4474 * @param iEffSeg The segment of the new idtr contents
4475 * @param GCPtrEffSrc The address of the new idtr contents.
4476 * @param enmEffOpSize The effective operand size.
4477 */
4478IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4479{
4480 if (pIemCpu->uCpl != 0)
4481 return iemRaiseGeneralProtectionFault0(pIemCpu);
4482 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
4483
4484 /*
4485 * Fetch the limit and base address.
4486 */
4487 uint16_t cbLimit;
4488 RTGCPTR GCPtrBase;
4489 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4490 if (rcStrict == VINF_SUCCESS)
4491 {
4492 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4493 CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
4494 else
4495 {
4496 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4497 pCtx->idtr.cbIdt = cbLimit;
4498 pCtx->idtr.pIdt = GCPtrBase;
4499 }
4500 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4501 }
4502 return rcStrict;
4503}
4504
4505
4506/**
4507 * Implements sidt.
4508 *
4509 * @param iEffSeg The segment where to store the idtr content.
4510 * @param GCPtrEffDst The address where to store the idtr content.
4511 * @param enmEffOpSize The effective operand size.
4512 */
4513IEM_CIMPL_DEF_3(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
4514{
4515 /*
4516 * Join paths with sgdt.
4517 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4518 * you really must know.
4519 */
4520 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4521 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
4522 if (rcStrict == VINF_SUCCESS)
4523 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4524 return rcStrict;
4525}
4526
4527
4528/**
4529 * Implements lldt.
4530 *
4531 * @param uNewLdt The new LDT selector value.
4532 */
4533IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
4534{
4535 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4536
4537 /*
4538 * Check preconditions.
4539 */
4540 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4541 {
4542 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
4543 return iemRaiseUndefinedOpcode(pIemCpu);
4544 }
4545 if (pIemCpu->uCpl != 0)
4546 {
4547 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
4548 return iemRaiseGeneralProtectionFault0(pIemCpu);
4549 }
4550 if (uNewLdt & X86_SEL_LDT)
4551 {
4552 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
4553 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
4554 }
4555
4556 /*
4557 * Now, loading a NULL selector is easy.
4558 */
4559 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4560 {
4561 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
4562 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4563 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt);
4564 else
4565 pCtx->ldtr.Sel = uNewLdt;
4566 pCtx->ldtr.ValidSel = uNewLdt;
4567 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4568 if (IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
4569 {
4570 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4571 pCtx->ldtr.u64Base = pCtx->ldtr.u32Limit = 0; /* For verfication against REM. */
4572 }
4573 else if (IEM_IS_GUEST_CPU_AMD(pIemCpu))
4574 {
4575 /* AMD-V seems to leave the base and limit alone. */
4576 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4577 }
4578 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
4579 {
4580 /* VT-x (Intel 3960x) seems to be doing the following. */
4581 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;
4582 pCtx->ldtr.u64Base = 0;
4583 pCtx->ldtr.u32Limit = UINT32_MAX;
4584 }
4585
4586 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4587 return VINF_SUCCESS;
4588 }
4589
4590 /*
4591 * Read the descriptor.
4592 */
4593 IEMSELDESC Desc;
4594 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
4595 if (rcStrict != VINF_SUCCESS)
4596 return rcStrict;
4597
4598 /* Check GPs first. */
4599 if (Desc.Legacy.Gen.u1DescType)
4600 {
4601 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4602 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4603 }
4604 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4605 {
4606 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4607 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4608 }
4609 uint64_t u64Base;
4610 if (!IEM_IS_LONG_MODE(pIemCpu))
4611 u64Base = X86DESC_BASE(&Desc.Legacy);
4612 else
4613 {
4614 if (Desc.Long.Gen.u5Zeros)
4615 {
4616 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
4617 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4618 }
4619
4620 u64Base = X86DESC64_BASE(&Desc.Long);
4621 if (!IEM_IS_CANONICAL(u64Base))
4622 {
4623 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
4624 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4625 }
4626 }
4627
4628 /* NP */
4629 if (!Desc.Legacy.Gen.u1Present)
4630 {
4631 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
4632 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
4633 }
4634
4635 /*
4636 * It checks out alright, update the registers.
4637 */
4638/** @todo check if the actual value is loaded or if the RPL is dropped */
4639 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4640 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK_OFF_RPL);
4641 else
4642 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4643 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4644 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4645 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4646 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
4647 pCtx->ldtr.u64Base = u64Base;
4648
4649 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4650 return VINF_SUCCESS;
4651}
4652
4653
4654/**
4655 * Implements lldt.
4656 *
4657 * @param uNewLdt The new LDT selector value.
4658 */
4659IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
4660{
4661 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4662
4663 /*
4664 * Check preconditions.
4665 */
4666 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4667 {
4668 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
4669 return iemRaiseUndefinedOpcode(pIemCpu);
4670 }
4671 if (pIemCpu->uCpl != 0)
4672 {
4673 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
4674 return iemRaiseGeneralProtectionFault0(pIemCpu);
4675 }
4676 if (uNewTr & X86_SEL_LDT)
4677 {
4678 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
4679 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
4680 }
4681 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
4682 {
4683 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
4684 return iemRaiseGeneralProtectionFault0(pIemCpu);
4685 }
4686
4687 /*
4688 * Read the descriptor.
4689 */
4690 IEMSELDESC Desc;
4691 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
4692 if (rcStrict != VINF_SUCCESS)
4693 return rcStrict;
4694
4695 /* Check GPs first. */
4696 if (Desc.Legacy.Gen.u1DescType)
4697 {
4698 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
4699 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4700 }
4701 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
4702 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4703 || IEM_IS_LONG_MODE(pIemCpu)) )
4704 {
4705 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
4706 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4707 }
4708 uint64_t u64Base;
4709 if (!IEM_IS_LONG_MODE(pIemCpu))
4710 u64Base = X86DESC_BASE(&Desc.Legacy);
4711 else
4712 {
4713 if (Desc.Long.Gen.u5Zeros)
4714 {
4715 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
4716 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4717 }
4718
4719 u64Base = X86DESC64_BASE(&Desc.Long);
4720 if (!IEM_IS_CANONICAL(u64Base))
4721 {
4722 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
4723 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4724 }
4725 }
4726
4727 /* NP */
4728 if (!Desc.Legacy.Gen.u1Present)
4729 {
4730 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
4731 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
4732 }
4733
4734 /*
4735 * Set it busy.
4736 * Note! Intel says this should lock down the whole descriptor, but we'll
4737 * restrict our selves to 32-bit for now due to lack of inline
4738 * assembly and such.
4739 */
4740 void *pvDesc;
4741 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW);
4742 if (rcStrict != VINF_SUCCESS)
4743 return rcStrict;
4744 switch ((uintptr_t)pvDesc & 3)
4745 {
4746 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
4747 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
4748 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
4749 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
4750 }
4751 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvDesc, IEM_ACCESS_DATA_RW);
4752 if (rcStrict != VINF_SUCCESS)
4753 return rcStrict;
4754 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4755
4756 /*
4757 * It checks out alright, update the registers.
4758 */
4759/** @todo check if the actual value is loaded or if the RPL is dropped */
4760 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4761 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK_OFF_RPL);
4762 else
4763 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
4764 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
4765 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4766 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4767 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
4768 pCtx->tr.u64Base = u64Base;
4769
4770 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4771 return VINF_SUCCESS;
4772}
4773
4774
4775/**
4776 * Implements mov GReg,CRx.
4777 *
4778 * @param iGReg The general register to store the CRx value in.
4779 * @param iCrReg The CRx register to read (valid).
4780 */
4781IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
4782{
4783 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4784 if (pIemCpu->uCpl != 0)
4785 return iemRaiseGeneralProtectionFault0(pIemCpu);
4786 Assert(!pCtx->eflags.Bits.u1VM);
4787
4788 /* read it */
4789 uint64_t crX;
4790 switch (iCrReg)
4791 {
4792 case 0: crX = pCtx->cr0; break;
4793 case 2: crX = pCtx->cr2; break;
4794 case 3: crX = pCtx->cr3; break;
4795 case 4: crX = pCtx->cr4; break;
4796 case 8:
4797 {
4798 uint8_t uTpr;
4799 int rc = PDMApicGetTPR(IEMCPU_TO_VMCPU(pIemCpu), &uTpr, NULL, NULL);
4800 if (RT_SUCCESS(rc))
4801 crX = uTpr >> 4;
4802 else
4803 crX = 0;
4804 break;
4805 }
4806 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
4807 }
4808
4809 /* store it */
4810 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4811 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
4812 else
4813 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
4814
4815 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
4816 return VINF_SUCCESS;
4817}
4818
4819
4820/**
4821 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
4822 *
4823 * @param iCrReg The CRx register to write (valid).
4824 * @param uNewCrX The new value.
4825 */
4826IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
4827{
4828 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4829 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4830 VBOXSTRICTRC rcStrict;
4831 int rc;
4832
4833 /*
4834 * Try store it.
4835 * Unfortunately, CPUM only does a tiny bit of the work.
4836 */
4837 switch (iCrReg)
4838 {
4839 case 0:
4840 {
4841 /*
4842 * Perform checks.
4843 */
4844 uint64_t const uOldCrX = pCtx->cr0;
4845 uNewCrX |= X86_CR0_ET; /* hardcoded */
4846
4847 /* Check for reserved bits. */
4848 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
4849 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
4850 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
4851 if (uNewCrX & ~(uint64_t)fValid)
4852 {
4853 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
4854 return iemRaiseGeneralProtectionFault0(pIemCpu);
4855 }
4856
4857 /* Check for invalid combinations. */
4858 if ( (uNewCrX & X86_CR0_PG)
4859 && !(uNewCrX & X86_CR0_PE) )
4860 {
4861 Log(("Trying to set CR0.PG without CR0.PE\n"));
4862 return iemRaiseGeneralProtectionFault0(pIemCpu);
4863 }
4864
4865 if ( !(uNewCrX & X86_CR0_CD)
4866 && (uNewCrX & X86_CR0_NW) )
4867 {
4868 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
4869 return iemRaiseGeneralProtectionFault0(pIemCpu);
4870 }
4871
4872 /* Long mode consistency checks. */
4873 if ( (uNewCrX & X86_CR0_PG)
4874 && !(uOldCrX & X86_CR0_PG)
4875 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
4876 {
4877 if (!(pCtx->cr4 & X86_CR4_PAE))
4878 {
4879 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
4880 return iemRaiseGeneralProtectionFault0(pIemCpu);
4881 }
4882 if (pCtx->cs.Attr.n.u1Long)
4883 {
4884 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
4885 return iemRaiseGeneralProtectionFault0(pIemCpu);
4886 }
4887 }
4888
4889 /** @todo check reserved PDPTR bits as AMD states. */
4890
4891 /*
4892 * Change CR0.
4893 */
4894 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
4895 CPUMSetGuestCR0(pVCpu, uNewCrX);
4896 else
4897 pCtx->cr0 = uNewCrX;
4898 Assert(pCtx->cr0 == uNewCrX);
4899
4900 /*
4901 * Change EFER.LMA if entering or leaving long mode.
4902 */
4903 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
4904 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
4905 {
4906 uint64_t NewEFER = pCtx->msrEFER;
4907 if (uNewCrX & X86_CR0_PG)
4908 NewEFER |= MSR_K6_EFER_LMA;
4909 else
4910 NewEFER &= ~MSR_K6_EFER_LMA;
4911
4912 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4913 CPUMSetGuestEFER(pVCpu, NewEFER);
4914 else
4915 pCtx->msrEFER = NewEFER;
4916 Assert(pCtx->msrEFER == NewEFER);
4917 }
4918
4919 /*
4920 * Inform PGM.
4921 */
4922 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4923 {
4924 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
4925 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
4926 {
4927 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
4928 AssertRCReturn(rc, rc);
4929 /* ignore informational status codes */
4930 }
4931 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
4932 }
4933 else
4934 rcStrict = VINF_SUCCESS;
4935
4936#ifdef IN_RC
4937 /* Return to ring-3 for rescheduling if WP or AM changes. */
4938 if ( rcStrict == VINF_SUCCESS
4939 && ( (uNewCrX & (X86_CR0_WP | X86_CR0_AM))
4940 != (uOldCrX & (X86_CR0_WP | X86_CR0_AM))) )
4941 rcStrict = VINF_EM_RESCHEDULE;
4942#endif
4943 break;
4944 }
4945
4946 /*
4947 * CR2 can be changed without any restrictions.
4948 */
4949 case 2:
4950 pCtx->cr2 = uNewCrX;
4951 rcStrict = VINF_SUCCESS;
4952 break;
4953
4954 /*
4955 * CR3 is relatively simple, although AMD and Intel have different
4956 * accounts of how setting reserved bits are handled. We take intel's
4957 * word for the lower bits and AMD's for the high bits (63:52). The
4958 * lower reserved bits are ignored and left alone; OpenBSD 5.8 relies
4959 * on this.
4960 */
4961 /** @todo Testcase: Setting reserved bits in CR3, especially before
4962 * enabling paging. */
4963 case 3:
4964 {
4965 /* check / mask the value. */
4966 if (uNewCrX & UINT64_C(0xfff0000000000000))
4967 {
4968 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
4969 return iemRaiseGeneralProtectionFault0(pIemCpu);
4970 }
4971
4972 uint64_t fValid;
4973 if ( (pCtx->cr4 & X86_CR4_PAE)
4974 && (pCtx->msrEFER & MSR_K6_EFER_LME))
4975 fValid = UINT64_C(0x000fffffffffffff);
4976 else
4977 fValid = UINT64_C(0xffffffff);
4978 if (uNewCrX & ~fValid)
4979 {
4980 Log(("Automatically clearing reserved MBZ bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
4981 uNewCrX, uNewCrX & ~fValid));
4982 uNewCrX &= fValid;
4983 }
4984
4985 /** @todo If we're in PAE mode we should check the PDPTRs for
4986 * invalid bits. */
4987
4988 /* Make the change. */
4989 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4990 {
4991 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
4992 AssertRCSuccessReturn(rc, rc);
4993 }
4994 else
4995 pCtx->cr3 = uNewCrX;
4996
4997 /* Inform PGM. */
4998 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
4999 {
5000 if (pCtx->cr0 & X86_CR0_PG)
5001 {
5002 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
5003 AssertRCReturn(rc, rc);
5004 /* ignore informational status codes */
5005 }
5006 }
5007 rcStrict = VINF_SUCCESS;
5008 break;
5009 }
5010
5011 /*
5012 * CR4 is a bit more tedious as there are bits which cannot be cleared
5013 * under some circumstances and such.
5014 */
5015 case 4:
5016 {
5017 uint64_t const uOldCrX = pCtx->cr4;
5018
5019 /** @todo Shouldn't this look at the guest CPUID bits to determine
5020 * valid bits? e.g. if guest CPUID doesn't allow X86_CR4_OSXMMEEXCPT, we
5021 * should #GP(0). */
5022 /* reserved bits */
5023 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
5024 | X86_CR4_TSD | X86_CR4_DE
5025 | X86_CR4_PSE | X86_CR4_PAE
5026 | X86_CR4_MCE | X86_CR4_PGE
5027 | X86_CR4_PCE | X86_CR4_OSFXSR
5028 | X86_CR4_OSXMMEEXCPT;
5029 //if (xxx)
5030 // fValid |= X86_CR4_VMXE;
5031 if (IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fXSaveRstor)
5032 fValid |= X86_CR4_OSXSAVE;
5033 if (uNewCrX & ~(uint64_t)fValid)
5034 {
5035 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5036 return iemRaiseGeneralProtectionFault0(pIemCpu);
5037 }
5038
5039 /* long mode checks. */
5040 if ( (uOldCrX & X86_CR4_PAE)
5041 && !(uNewCrX & X86_CR4_PAE)
5042 && CPUMIsGuestInLongModeEx(pCtx) )
5043 {
5044 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
5045 return iemRaiseGeneralProtectionFault0(pIemCpu);
5046 }
5047
5048
5049 /*
5050 * Change it.
5051 */
5052 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5053 {
5054 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
5055 AssertRCSuccessReturn(rc, rc);
5056 }
5057 else
5058 pCtx->cr4 = uNewCrX;
5059 Assert(pCtx->cr4 == uNewCrX);
5060
5061 /*
5062 * Notify SELM and PGM.
5063 */
5064 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5065 {
5066 /* SELM - VME may change things wrt to the TSS shadowing. */
5067 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
5068 {
5069 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
5070 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
5071#ifdef VBOX_WITH_RAW_MODE
5072 if (!HMIsEnabled(IEMCPU_TO_VM(pIemCpu)))
5073 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
5074#endif
5075 }
5076
5077 /* PGM - flushing and mode. */
5078 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE))
5079 {
5080 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
5081 AssertRCReturn(rc, rc);
5082 /* ignore informational status codes */
5083 }
5084 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
5085 }
5086 else
5087 rcStrict = VINF_SUCCESS;
5088 break;
5089 }
5090
5091 /*
5092 * CR8 maps to the APIC TPR.
5093 */
5094 case 8:
5095 if (uNewCrX & ~(uint64_t)0xf)
5096 {
5097 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
5098 return iemRaiseGeneralProtectionFault0(pIemCpu);
5099 }
5100
5101 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
5102 PDMApicSetTPR(IEMCPU_TO_VMCPU(pIemCpu), (uint8_t)uNewCrX << 4);
5103 rcStrict = VINF_SUCCESS;
5104 break;
5105
5106 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5107 }
5108
5109 /*
5110 * Advance the RIP on success.
5111 */
5112 if (RT_SUCCESS(rcStrict))
5113 {
5114 if (rcStrict != VINF_SUCCESS)
5115 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
5116 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5117 }
5118
5119 return rcStrict;
5120}
5121
5122
5123/**
5124 * Implements mov CRx,GReg.
5125 *
5126 * @param iCrReg The CRx register to write (valid).
5127 * @param iGReg The general register to load the DRx value from.
5128 */
5129IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
5130{
5131 if (pIemCpu->uCpl != 0)
5132 return iemRaiseGeneralProtectionFault0(pIemCpu);
5133 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
5134
5135 /*
5136 * Read the new value from the source register and call common worker.
5137 */
5138 uint64_t uNewCrX;
5139 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5140 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
5141 else
5142 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
5143 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
5144}
5145
5146
5147/**
5148 * Implements 'LMSW r/m16'
5149 *
5150 * @param u16NewMsw The new value.
5151 */
5152IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
5153{
5154 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5155
5156 if (pIemCpu->uCpl != 0)
5157 return iemRaiseGeneralProtectionFault0(pIemCpu);
5158 Assert(!pCtx->eflags.Bits.u1VM);
5159
5160 /*
5161 * Compose the new CR0 value and call common worker.
5162 */
5163 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5164 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5165 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
5166}
5167
5168
5169/**
5170 * Implements 'CLTS'.
5171 */
5172IEM_CIMPL_DEF_0(iemCImpl_clts)
5173{
5174 if (pIemCpu->uCpl != 0)
5175 return iemRaiseGeneralProtectionFault0(pIemCpu);
5176
5177 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5178 uint64_t uNewCr0 = pCtx->cr0;
5179 uNewCr0 &= ~X86_CR0_TS;
5180 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
5181}
5182
5183
5184/**
5185 * Implements mov GReg,DRx.
5186 *
5187 * @param iGReg The general register to store the DRx value in.
5188 * @param iDrReg The DRx register to read (0-7).
5189 */
5190IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
5191{
5192 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5193
5194 /*
5195 * Check preconditions.
5196 */
5197
5198 /* Raise GPs. */
5199 if (pIemCpu->uCpl != 0)
5200 return iemRaiseGeneralProtectionFault0(pIemCpu);
5201 Assert(!pCtx->eflags.Bits.u1VM);
5202
5203 if ( (iDrReg == 4 || iDrReg == 5)
5204 && (pCtx->cr4 & X86_CR4_DE) )
5205 {
5206 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
5207 return iemRaiseGeneralProtectionFault0(pIemCpu);
5208 }
5209
5210 /* Raise #DB if general access detect is enabled. */
5211 if (pCtx->dr[7] & X86_DR7_GD)
5212 {
5213 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
5214 return iemRaiseDebugException(pIemCpu);
5215 }
5216
5217 /*
5218 * Read the debug register and store it in the specified general register.
5219 */
5220 uint64_t drX;
5221 switch (iDrReg)
5222 {
5223 case 0: drX = pCtx->dr[0]; break;
5224 case 1: drX = pCtx->dr[1]; break;
5225 case 2: drX = pCtx->dr[2]; break;
5226 case 3: drX = pCtx->dr[3]; break;
5227 case 6:
5228 case 4:
5229 drX = pCtx->dr[6];
5230 drX |= X86_DR6_RA1_MASK;
5231 drX &= ~X86_DR6_RAZ_MASK;
5232 break;
5233 case 7:
5234 case 5:
5235 drX = pCtx->dr[7];
5236 drX |=X86_DR7_RA1_MASK;
5237 drX &= ~X86_DR7_RAZ_MASK;
5238 break;
5239 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5240 }
5241
5242 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5243 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
5244 else
5245 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
5246
5247 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5248 return VINF_SUCCESS;
5249}
5250
5251
5252/**
5253 * Implements mov DRx,GReg.
5254 *
5255 * @param iDrReg The DRx register to write (valid).
5256 * @param iGReg The general register to load the DRx value from.
5257 */
5258IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
5259{
5260 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5261
5262 /*
5263 * Check preconditions.
5264 */
5265 if (pIemCpu->uCpl != 0)
5266 return iemRaiseGeneralProtectionFault0(pIemCpu);
5267 Assert(!pCtx->eflags.Bits.u1VM);
5268
5269 if (iDrReg == 4 || iDrReg == 5)
5270 {
5271 if (pCtx->cr4 & X86_CR4_DE)
5272 {
5273 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
5274 return iemRaiseGeneralProtectionFault0(pIemCpu);
5275 }
5276 iDrReg += 2;
5277 }
5278
5279 /* Raise #DB if general access detect is enabled. */
5280 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
5281 * \#GP? */
5282 if (pCtx->dr[7] & X86_DR7_GD)
5283 {
5284 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
5285 return iemRaiseDebugException(pIemCpu);
5286 }
5287
5288 /*
5289 * Read the new value from the source register.
5290 */
5291 uint64_t uNewDrX;
5292 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
5293 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
5294 else
5295 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
5296
5297 /*
5298 * Adjust it.
5299 */
5300 switch (iDrReg)
5301 {
5302 case 0:
5303 case 1:
5304 case 2:
5305 case 3:
5306 /* nothing to adjust */
5307 break;
5308
5309 case 6:
5310 if (uNewDrX & X86_DR6_MBZ_MASK)
5311 {
5312 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5313 return iemRaiseGeneralProtectionFault0(pIemCpu);
5314 }
5315 uNewDrX |= X86_DR6_RA1_MASK;
5316 uNewDrX &= ~X86_DR6_RAZ_MASK;
5317 break;
5318
5319 case 7:
5320 if (uNewDrX & X86_DR7_MBZ_MASK)
5321 {
5322 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5323 return iemRaiseGeneralProtectionFault0(pIemCpu);
5324 }
5325 uNewDrX |= X86_DR7_RA1_MASK;
5326 uNewDrX &= ~X86_DR7_RAZ_MASK;
5327 break;
5328
5329 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5330 }
5331
5332 /*
5333 * Do the actual setting.
5334 */
5335 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5336 {
5337 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
5338 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc);
5339 }
5340 else
5341 pCtx->dr[iDrReg] = uNewDrX;
5342
5343 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5344 return VINF_SUCCESS;
5345}
5346
5347
5348/**
5349 * Implements 'INVLPG m'.
5350 *
5351 * @param GCPtrPage The effective address of the page to invalidate.
5352 * @remarks Updates the RIP.
5353 */
5354IEM_CIMPL_DEF_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage)
5355{
5356 /* ring-0 only. */
5357 if (pIemCpu->uCpl != 0)
5358 return iemRaiseGeneralProtectionFault0(pIemCpu);
5359 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
5360
5361 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
5362 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5363
5364 if (rc == VINF_SUCCESS)
5365 return VINF_SUCCESS;
5366 if (rc == VINF_PGM_SYNC_CR3)
5367 return iemSetPassUpStatus(pIemCpu, rc);
5368
5369 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
5370 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", GCPtrPage, rc));
5371 return rc;
5372}
5373
5374
5375/**
5376 * Implements RDTSC.
5377 */
5378IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
5379{
5380 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5381
5382 /*
5383 * Check preconditions.
5384 */
5385 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fTsc)
5386 return iemRaiseUndefinedOpcode(pIemCpu);
5387
5388 if ( (pCtx->cr4 & X86_CR4_TSD)
5389 && pIemCpu->uCpl != 0)
5390 {
5391 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
5392 return iemRaiseGeneralProtectionFault0(pIemCpu);
5393 }
5394
5395 /*
5396 * Do the job.
5397 */
5398 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
5399 pCtx->rax = (uint32_t)uTicks;
5400 pCtx->rdx = uTicks >> 32;
5401#ifdef IEM_VERIFICATION_MODE_FULL
5402 pIemCpu->fIgnoreRaxRdx = true;
5403#endif
5404
5405 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5406 return VINF_SUCCESS;
5407}
5408
5409
5410/**
5411 * Implements RDMSR.
5412 */
5413IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
5414{
5415 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5416
5417 /*
5418 * Check preconditions.
5419 */
5420 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMsr)
5421 return iemRaiseUndefinedOpcode(pIemCpu);
5422 if (pIemCpu->uCpl != 0)
5423 return iemRaiseGeneralProtectionFault0(pIemCpu);
5424
5425 /*
5426 * Do the job.
5427 */
5428 RTUINT64U uValue;
5429 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
5430 if (rcStrict == VINF_SUCCESS)
5431 {
5432 pCtx->rax = uValue.s.Lo;
5433 pCtx->rdx = uValue.s.Hi;
5434
5435 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5436 return VINF_SUCCESS;
5437 }
5438
5439#ifndef IN_RING3
5440 /* Deferred to ring-3. */
5441 if (rcStrict == VINF_CPUM_R3_MSR_READ)
5442 {
5443 Log(("IEM: rdmsr(%#x) -> ring-3\n", pCtx->ecx));
5444 return rcStrict;
5445 }
5446#else /* IN_RING3 */
5447 /* Often a unimplemented MSR or MSR bit, so worth logging. */
5448 static uint32_t s_cTimes = 0;
5449 if (s_cTimes++ < 10)
5450 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
5451 else
5452#endif
5453 Log(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
5454 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
5455 return iemRaiseGeneralProtectionFault0(pIemCpu);
5456}
5457
5458
5459/**
5460 * Implements WRMSR.
5461 */
5462IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
5463{
5464 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5465
5466 /*
5467 * Check preconditions.
5468 */
5469 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMsr)
5470 return iemRaiseUndefinedOpcode(pIemCpu);
5471 if (pIemCpu->uCpl != 0)
5472 return iemRaiseGeneralProtectionFault0(pIemCpu);
5473
5474 /*
5475 * Do the job.
5476 */
5477 RTUINT64U uValue;
5478 uValue.s.Lo = pCtx->eax;
5479 uValue.s.Hi = pCtx->edx;
5480
5481 VBOXSTRICTRC rcStrict;
5482 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5483 rcStrict = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
5484 else
5485 {
5486#ifdef IN_RING3
5487 CPUMCTX CtxTmp = *pCtx;
5488 rcStrict = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
5489 PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
5490 *pCtx = *pCtx2;
5491 *pCtx2 = CtxTmp;
5492#else
5493 AssertReleaseFailedReturn(VERR_IEM_IPE_2);
5494#endif
5495 }
5496 if (rcStrict == VINF_SUCCESS)
5497 {
5498 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5499 return VINF_SUCCESS;
5500 }
5501
5502#ifndef IN_RING3
5503 /* Deferred to ring-3. */
5504 if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
5505 {
5506 Log(("IEM: rdmsr(%#x) -> ring-3\n", pCtx->ecx));
5507 return rcStrict;
5508 }
5509#else /* IN_RING3 */
5510 /* Often a unimplemented MSR or MSR bit, so worth logging. */
5511 static uint32_t s_cTimes = 0;
5512 if (s_cTimes++ < 10)
5513 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
5514 else
5515#endif
5516 Log(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
5517 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
5518 return iemRaiseGeneralProtectionFault0(pIemCpu);
5519}
5520
5521
5522/**
5523 * Implements 'IN eAX, port'.
5524 *
5525 * @param u16Port The source port.
5526 * @param cbReg The register size.
5527 */
5528IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
5529{
5530 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5531
5532 /*
5533 * CPL check
5534 */
5535 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
5536 if (rcStrict != VINF_SUCCESS)
5537 return rcStrict;
5538
5539 /*
5540 * Perform the I/O.
5541 */
5542 uint32_t u32Value;
5543 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5544 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, &u32Value, cbReg);
5545 else
5546 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
5547 if (IOM_SUCCESS(rcStrict))
5548 {
5549 switch (cbReg)
5550 {
5551 case 1: pCtx->al = (uint8_t)u32Value; break;
5552 case 2: pCtx->ax = (uint16_t)u32Value; break;
5553 case 4: pCtx->rax = u32Value; break;
5554 default: AssertFailedReturn(VERR_IEM_IPE_3);
5555 }
5556 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5557 pIemCpu->cPotentialExits++;
5558 if (rcStrict != VINF_SUCCESS)
5559 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
5560 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
5561
5562 /*
5563 * Check for I/O breakpoints.
5564 */
5565 uint32_t const uDr7 = pCtx->dr[7];
5566 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
5567 && X86_DR7_ANY_RW_IO(uDr7)
5568 && (pCtx->cr4 & X86_CR4_DE))
5569 || DBGFBpIsHwIoArmed(IEMCPU_TO_VM(pIemCpu))))
5570 {
5571 rcStrict = DBGFBpCheckIo(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), pCtx, u16Port, cbReg);
5572 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
5573 rcStrict = iemRaiseDebugException(pIemCpu);
5574 }
5575 }
5576
5577 return rcStrict;
5578}
5579
5580
5581/**
5582 * Implements 'IN eAX, DX'.
5583 *
5584 * @param cbReg The register size.
5585 */
5586IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
5587{
5588 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
5589}
5590
5591
5592/**
5593 * Implements 'OUT port, eAX'.
5594 *
5595 * @param u16Port The destination port.
5596 * @param cbReg The register size.
5597 */
5598IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
5599{
5600 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5601
5602 /*
5603 * CPL check
5604 */
5605 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
5606 if (rcStrict != VINF_SUCCESS)
5607 return rcStrict;
5608
5609 /*
5610 * Perform the I/O.
5611 */
5612 uint32_t u32Value;
5613 switch (cbReg)
5614 {
5615 case 1: u32Value = pCtx->al; break;
5616 case 2: u32Value = pCtx->ax; break;
5617 case 4: u32Value = pCtx->eax; break;
5618 default: AssertFailedReturn(VERR_IEM_IPE_4);
5619 }
5620 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
5621 rcStrict = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, u32Value, cbReg);
5622 else
5623 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
5624 if (IOM_SUCCESS(rcStrict))
5625 {
5626 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5627 pIemCpu->cPotentialExits++;
5628 if (rcStrict != VINF_SUCCESS)
5629 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
5630 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
5631
5632 /*
5633 * Check for I/O breakpoints.
5634 */
5635 uint32_t const uDr7 = pCtx->dr[7];
5636 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
5637 && X86_DR7_ANY_RW_IO(uDr7)
5638 && (pCtx->cr4 & X86_CR4_DE))
5639 || DBGFBpIsHwIoArmed(IEMCPU_TO_VM(pIemCpu))))
5640 {
5641 rcStrict = DBGFBpCheckIo(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), pCtx, u16Port, cbReg);
5642 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
5643 rcStrict = iemRaiseDebugException(pIemCpu);
5644 }
5645 }
5646 return rcStrict;
5647}
5648
5649
5650/**
5651 * Implements 'OUT DX, eAX'.
5652 *
5653 * @param cbReg The register size.
5654 */
5655IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
5656{
5657 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
5658}
5659
5660
5661/**
5662 * Implements 'CLI'.
5663 */
5664IEM_CIMPL_DEF_0(iemCImpl_cli)
5665{
5666 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5667 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
5668 uint32_t const fEflOld = fEfl;
5669 if (pCtx->cr0 & X86_CR0_PE)
5670 {
5671 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
5672 if (!(fEfl & X86_EFL_VM))
5673 {
5674 if (pIemCpu->uCpl <= uIopl)
5675 fEfl &= ~X86_EFL_IF;
5676 else if ( pIemCpu->uCpl == 3
5677 && (pCtx->cr4 & X86_CR4_PVI) )
5678 fEfl &= ~X86_EFL_VIF;
5679 else
5680 return iemRaiseGeneralProtectionFault0(pIemCpu);
5681 }
5682 /* V8086 */
5683 else if (uIopl == 3)
5684 fEfl &= ~X86_EFL_IF;
5685 else if ( uIopl < 3
5686 && (pCtx->cr4 & X86_CR4_VME) )
5687 fEfl &= ~X86_EFL_VIF;
5688 else
5689 return iemRaiseGeneralProtectionFault0(pIemCpu);
5690 }
5691 /* real mode */
5692 else
5693 fEfl &= ~X86_EFL_IF;
5694
5695 /* Commit. */
5696 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
5697 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5698 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld);
5699 return VINF_SUCCESS;
5700}
5701
5702
5703/**
5704 * Implements 'STI'.
5705 */
5706IEM_CIMPL_DEF_0(iemCImpl_sti)
5707{
5708 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5709 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
5710 uint32_t const fEflOld = fEfl;
5711
5712 if (pCtx->cr0 & X86_CR0_PE)
5713 {
5714 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
5715 if (!(fEfl & X86_EFL_VM))
5716 {
5717 if (pIemCpu->uCpl <= uIopl)
5718 fEfl |= X86_EFL_IF;
5719 else if ( pIemCpu->uCpl == 3
5720 && (pCtx->cr4 & X86_CR4_PVI)
5721 && !(fEfl & X86_EFL_VIP) )
5722 fEfl |= X86_EFL_VIF;
5723 else
5724 return iemRaiseGeneralProtectionFault0(pIemCpu);
5725 }
5726 /* V8086 */
5727 else if (uIopl == 3)
5728 fEfl |= X86_EFL_IF;
5729 else if ( uIopl < 3
5730 && (pCtx->cr4 & X86_CR4_VME)
5731 && !(fEfl & X86_EFL_VIP) )
5732 fEfl |= X86_EFL_VIF;
5733 else
5734 return iemRaiseGeneralProtectionFault0(pIemCpu);
5735 }
5736 /* real mode */
5737 else
5738 fEfl |= X86_EFL_IF;
5739
5740 /* Commit. */
5741 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
5742 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5743 if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
5744 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
5745 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
5746 return VINF_SUCCESS;
5747}
5748
5749
5750/**
5751 * Implements 'HLT'.
5752 */
5753IEM_CIMPL_DEF_0(iemCImpl_hlt)
5754{
5755 if (pIemCpu->uCpl != 0)
5756 return iemRaiseGeneralProtectionFault0(pIemCpu);
5757 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5758 return VINF_EM_HALT;
5759}
5760
5761
5762/**
5763 * Implements 'MONITOR'.
5764 */
5765IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
5766{
5767 /*
5768 * Permission checks.
5769 */
5770 if (pIemCpu->uCpl != 0)
5771 {
5772 Log2(("monitor: CPL != 0\n"));
5773 return iemRaiseUndefinedOpcode(pIemCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
5774 }
5775 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMonitorMWait)
5776 {
5777 Log2(("monitor: Not in CPUID\n"));
5778 return iemRaiseUndefinedOpcode(pIemCpu);
5779 }
5780
5781 /*
5782 * Gather the operands and validate them.
5783 */
5784 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5785 RTGCPTR GCPtrMem = pIemCpu->enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
5786 uint32_t uEcx = pCtx->ecx;
5787 uint32_t uEdx = pCtx->edx;
5788/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
5789 * \#GP first. */
5790 if (uEcx != 0)
5791 {
5792 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx);
5793 return iemRaiseGeneralProtectionFault0(pIemCpu);
5794 }
5795
5796 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
5797 if (rcStrict != VINF_SUCCESS)
5798 return rcStrict;
5799
5800 RTGCPHYS GCPhysMem;
5801 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
5802 if (rcStrict != VINF_SUCCESS)
5803 return rcStrict;
5804
5805 /*
5806 * Call EM to prepare the monitor/wait.
5807 */
5808 rcStrict = EMMonitorWaitPrepare(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rax, pCtx->rcx, pCtx->rdx, GCPhysMem);
5809 Assert(rcStrict == VINF_SUCCESS);
5810
5811 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5812 return rcStrict;
5813}
5814
5815
5816/**
5817 * Implements 'MWAIT'.
5818 */
5819IEM_CIMPL_DEF_0(iemCImpl_mwait)
5820{
5821 /*
5822 * Permission checks.
5823 */
5824 if (pIemCpu->uCpl != 0)
5825 {
5826 Log2(("mwait: CPL != 0\n"));
5827 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
5828 * EFLAGS.VM then.) */
5829 return iemRaiseUndefinedOpcode(pIemCpu);
5830 }
5831 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMonitorMWait)
5832 {
5833 Log2(("mwait: Not in CPUID\n"));
5834 return iemRaiseUndefinedOpcode(pIemCpu);
5835 }
5836
5837 /*
5838 * Gather the operands and validate them.
5839 */
5840 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5841 uint32_t uEax = pCtx->eax;
5842 uint32_t uEcx = pCtx->ecx;
5843 if (uEcx != 0)
5844 {
5845 /* Only supported extension is break on IRQ when IF=0. */
5846 if (uEcx > 1)
5847 {
5848 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
5849 return iemRaiseGeneralProtectionFault0(pIemCpu);
5850 }
5851 uint32_t fMWaitFeatures = 0;
5852 uint32_t uIgnore = 0;
5853 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 5, 0, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
5854 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
5855 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
5856 {
5857 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
5858 return iemRaiseGeneralProtectionFault0(pIemCpu);
5859 }
5860 }
5861
5862 /*
5863 * Call EM to prepare the monitor/wait.
5864 */
5865 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(IEMCPU_TO_VMCPU(pIemCpu), uEax, uEcx);
5866
5867 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5868 return rcStrict;
5869}
5870
5871
5872/**
5873 * Implements 'SWAPGS'.
5874 */
5875IEM_CIMPL_DEF_0(iemCImpl_swapgs)
5876{
5877 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT); /* Caller checks this. */
5878
5879 /*
5880 * Permission checks.
5881 */
5882 if (pIemCpu->uCpl != 0)
5883 {
5884 Log2(("swapgs: CPL != 0\n"));
5885 return iemRaiseUndefinedOpcode(pIemCpu);
5886 }
5887
5888 /*
5889 * Do the job.
5890 */
5891 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5892 uint64_t uOtherGsBase = pCtx->msrKERNELGSBASE;
5893 pCtx->msrKERNELGSBASE = pCtx->gs.u64Base;
5894 pCtx->gs.u64Base = uOtherGsBase;
5895
5896 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5897 return VINF_SUCCESS;
5898}
5899
5900
5901/**
5902 * Implements 'CPUID'.
5903 */
5904IEM_CIMPL_DEF_0(iemCImpl_cpuid)
5905{
5906 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5907
5908 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
5909 pCtx->rax &= UINT32_C(0xffffffff);
5910 pCtx->rbx &= UINT32_C(0xffffffff);
5911 pCtx->rcx &= UINT32_C(0xffffffff);
5912 pCtx->rdx &= UINT32_C(0xffffffff);
5913
5914 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5915 return VINF_SUCCESS;
5916}
5917
5918
5919/**
5920 * Implements 'AAD'.
5921 *
5922 * @param bImm The immediate operand.
5923 */
5924IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
5925{
5926 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5927
5928 uint16_t const ax = pCtx->ax;
5929 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
5930 pCtx->ax = al;
5931 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
5932 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
5933 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
5934
5935 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5936 return VINF_SUCCESS;
5937}
5938
5939
5940/**
5941 * Implements 'AAM'.
5942 *
5943 * @param bImm The immediate operand. Cannot be 0.
5944 */
5945IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
5946{
5947 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5948 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
5949
5950 uint16_t const ax = pCtx->ax;
5951 uint8_t const al = (uint8_t)ax % bImm;
5952 uint8_t const ah = (uint8_t)ax / bImm;
5953 pCtx->ax = (ah << 8) + al;
5954 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
5955 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
5956 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
5957
5958 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5959 return VINF_SUCCESS;
5960}
5961
5962
5963/**
5964 * Implements 'DAA'.
5965 */
5966IEM_CIMPL_DEF_0(iemCImpl_daa)
5967{
5968 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
5969
5970 uint8_t const al = pCtx->al;
5971 bool const fCarry = pCtx->eflags.Bits.u1CF;
5972
5973 if ( pCtx->eflags.Bits.u1AF
5974 || (al & 0xf) >= 10)
5975 {
5976 pCtx->al = al + 6;
5977 pCtx->eflags.Bits.u1AF = 1;
5978 }
5979 else
5980 pCtx->eflags.Bits.u1AF = 0;
5981
5982 if (al >= 0x9a || fCarry)
5983 {
5984 pCtx->al += 0x60;
5985 pCtx->eflags.Bits.u1CF = 1;
5986 }
5987 else
5988 pCtx->eflags.Bits.u1CF = 0;
5989
5990 iemHlpUpdateArithEFlagsU8(pIemCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
5991 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
5992 return VINF_SUCCESS;
5993}
5994
5995
5996/**
5997 * Implements 'DAS'.
5998 */
5999IEM_CIMPL_DEF_0(iemCImpl_das)
6000{
6001 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6002
6003 uint8_t const uInputAL = pCtx->al;
6004 bool const fCarry = pCtx->eflags.Bits.u1CF;
6005
6006 if ( pCtx->eflags.Bits.u1AF
6007 || (uInputAL & 0xf) >= 10)
6008 {
6009 pCtx->eflags.Bits.u1AF = 1;
6010 if (uInputAL < 6)
6011 pCtx->eflags.Bits.u1CF = 1;
6012 pCtx->al = uInputAL - 6;
6013 }
6014 else
6015 {
6016 pCtx->eflags.Bits.u1AF = 0;
6017 pCtx->eflags.Bits.u1CF = 0;
6018 }
6019
6020 if (uInputAL >= 0x9a || fCarry)
6021 {
6022 pCtx->al -= 0x60;
6023 pCtx->eflags.Bits.u1CF = 1;
6024 }
6025
6026 iemHlpUpdateArithEFlagsU8(pIemCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6027 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6028 return VINF_SUCCESS;
6029}
6030
6031
6032
6033
6034/*
6035 * Instantiate the various string operation combinations.
6036 */
6037#define OP_SIZE 8
6038#define ADDR_SIZE 16
6039#include "IEMAllCImplStrInstr.cpp.h"
6040#define OP_SIZE 8
6041#define ADDR_SIZE 32
6042#include "IEMAllCImplStrInstr.cpp.h"
6043#define OP_SIZE 8
6044#define ADDR_SIZE 64
6045#include "IEMAllCImplStrInstr.cpp.h"
6046
6047#define OP_SIZE 16
6048#define ADDR_SIZE 16
6049#include "IEMAllCImplStrInstr.cpp.h"
6050#define OP_SIZE 16
6051#define ADDR_SIZE 32
6052#include "IEMAllCImplStrInstr.cpp.h"
6053#define OP_SIZE 16
6054#define ADDR_SIZE 64
6055#include "IEMAllCImplStrInstr.cpp.h"
6056
6057#define OP_SIZE 32
6058#define ADDR_SIZE 16
6059#include "IEMAllCImplStrInstr.cpp.h"
6060#define OP_SIZE 32
6061#define ADDR_SIZE 32
6062#include "IEMAllCImplStrInstr.cpp.h"
6063#define OP_SIZE 32
6064#define ADDR_SIZE 64
6065#include "IEMAllCImplStrInstr.cpp.h"
6066
6067#define OP_SIZE 64
6068#define ADDR_SIZE 32
6069#include "IEMAllCImplStrInstr.cpp.h"
6070#define OP_SIZE 64
6071#define ADDR_SIZE 64
6072#include "IEMAllCImplStrInstr.cpp.h"
6073
6074
6075/**
6076 * Implements 'XGETBV'.
6077 */
6078IEM_CIMPL_DEF_0(iemCImpl_xgetbv)
6079{
6080 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6081 if (pCtx->cr4 & X86_CR4_OSXSAVE)
6082 {
6083 uint32_t uEcx = pCtx->ecx;
6084 switch (uEcx)
6085 {
6086 case 0:
6087 break;
6088
6089 case 1: /** @todo Implement XCR1 support. */
6090 default:
6091 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx));
6092 return iemRaiseGeneralProtectionFault0(pIemCpu);
6093
6094 }
6095 pCtx->rax = RT_LO_U32(pCtx->aXcr[uEcx]);
6096 pCtx->rdx = RT_HI_U32(pCtx->aXcr[uEcx]);
6097
6098 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6099 return VINF_SUCCESS;
6100 }
6101 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n"));
6102 return iemRaiseUndefinedOpcode(pIemCpu);
6103}
6104
6105
6106/**
6107 * Implements 'XSETBV'.
6108 */
6109IEM_CIMPL_DEF_0(iemCImpl_xsetbv)
6110{
6111 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6112 if (pCtx->cr4 & X86_CR4_OSXSAVE)
6113 {
6114 if (pIemCpu->uCpl == 0)
6115 {
6116 uint32_t uEcx = pCtx->ecx;
6117 uint64_t uNewValue = RT_MAKE_U64(pCtx->eax, pCtx->edx);
6118 switch (uEcx)
6119 {
6120 case 0:
6121 {
6122 int rc = CPUMSetGuestXcr0(IEMCPU_TO_VMCPU(pIemCpu), uNewValue);
6123 if (rc == VINF_SUCCESS)
6124 break;
6125 Assert(rc == VERR_CPUM_RAISE_GP_0);
6126 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
6127 return iemRaiseGeneralProtectionFault0(pIemCpu);
6128 }
6129
6130 case 1: /** @todo Implement XCR1 support. */
6131 default:
6132 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
6133 return iemRaiseGeneralProtectionFault0(pIemCpu);
6134
6135 }
6136
6137 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6138 return VINF_SUCCESS;
6139 }
6140
6141 Log(("xsetbv cpl=%u -> GP(0)\n", pIemCpu->uCpl));
6142 return iemRaiseGeneralProtectionFault0(pIemCpu);
6143 }
6144 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n"));
6145 return iemRaiseUndefinedOpcode(pIemCpu);
6146}
6147
6148
6149
6150/**
6151 * Implements 'FINIT' and 'FNINIT'.
6152 *
6153 * @param fCheckXcpts Whether to check for umasked pending exceptions or
6154 * not.
6155 */
6156IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
6157{
6158 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6159
6160 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
6161 return iemRaiseDeviceNotAvailable(pIemCpu);
6162
6163 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
6164 if (fCheckXcpts && TODO )
6165 return iemRaiseMathFault(pIemCpu);
6166 */
6167
6168 PX86XSAVEAREA pXState = pCtx->CTX_SUFF(pXState);
6169 pXState->x87.FCW = 0x37f;
6170 pXState->x87.FSW = 0;
6171 pXState->x87.FTW = 0x00; /* 0 - empty. */
6172 pXState->x87.FPUDP = 0;
6173 pXState->x87.DS = 0; //??
6174 pXState->x87.Rsrvd2= 0;
6175 pXState->x87.FPUIP = 0;
6176 pXState->x87.CS = 0; //??
6177 pXState->x87.Rsrvd1= 0;
6178 pXState->x87.FOP = 0;
6179
6180 iemHlpUsedFpu(pIemCpu);
6181 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6182 return VINF_SUCCESS;
6183}
6184
6185
6186/**
6187 * Implements 'FXSAVE'.
6188 *
6189 * @param iEffSeg The effective segment.
6190 * @param GCPtrEff The address of the image.
6191 * @param enmEffOpSize The operand size (only REX.W really matters).
6192 */
6193IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
6194{
6195 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6196
6197 /*
6198 * Raise exceptions.
6199 */
6200 if (pCtx->cr0 & X86_CR0_EM)
6201 return iemRaiseUndefinedOpcode(pIemCpu);
6202 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
6203 return iemRaiseDeviceNotAvailable(pIemCpu);
6204 if (GCPtrEff & 15)
6205 {
6206 /** @todo CPU/VM detection possible! \#AC might not be signal for
6207 * all/any misalignment sizes, intel says its an implementation detail. */
6208 if ( (pCtx->cr0 & X86_CR0_AM)
6209 && pCtx->eflags.Bits.u1AC
6210 && pIemCpu->uCpl == 3)
6211 return iemRaiseAlignmentCheckException(pIemCpu);
6212 return iemRaiseGeneralProtectionFault0(pIemCpu);
6213 }
6214
6215 /*
6216 * Access the memory.
6217 */
6218 void *pvMem512;
6219 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6220 if (rcStrict != VINF_SUCCESS)
6221 return rcStrict;
6222 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
6223 PCX86FXSTATE pSrc = &pCtx->CTX_SUFF(pXState)->x87;
6224
6225 /*
6226 * Store the registers.
6227 */
6228 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
6229 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
6230
6231 /* common for all formats */
6232 pDst->FCW = pSrc->FCW;
6233 pDst->FSW = pSrc->FSW;
6234 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
6235 pDst->FOP = pSrc->FOP;
6236 pDst->MXCSR = pSrc->MXCSR;
6237 pDst->MXCSR_MASK = pSrc->MXCSR_MASK;
6238 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
6239 {
6240 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
6241 * them for now... */
6242 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
6243 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
6244 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
6245 pDst->aRegs[i].au32[3] = 0;
6246 }
6247
6248 /* FPU IP, CS, DP and DS. */
6249 pDst->FPUIP = pSrc->FPUIP;
6250 pDst->CS = pSrc->CS;
6251 pDst->FPUDP = pSrc->FPUDP;
6252 pDst->DS = pSrc->DS;
6253 if (enmEffOpSize == IEMMODE_64BIT)
6254 {
6255 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
6256 pDst->Rsrvd1 = pSrc->Rsrvd1;
6257 pDst->Rsrvd2 = pSrc->Rsrvd2;
6258 pDst->au32RsrvdForSoftware[0] = 0;
6259 }
6260 else
6261 {
6262 pDst->Rsrvd1 = 0;
6263 pDst->Rsrvd2 = 0;
6264 pDst->au32RsrvdForSoftware[0] = X86_FXSTATE_RSVD_32BIT_MAGIC;
6265 }
6266
6267 /* XMM registers. */
6268 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
6269 || pIemCpu->enmCpuMode != IEMMODE_64BIT
6270 || pIemCpu->uCpl != 0)
6271 {
6272 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
6273 for (uint32_t i = 0; i < cXmmRegs; i++)
6274 pDst->aXMM[i] = pSrc->aXMM[i];
6275 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
6276 * right? */
6277 }
6278
6279 /*
6280 * Commit the memory.
6281 */
6282 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6283 if (rcStrict != VINF_SUCCESS)
6284 return rcStrict;
6285
6286 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6287 return VINF_SUCCESS;
6288}
6289
6290
6291/**
6292 * Implements 'FXRSTOR'.
6293 *
6294 * @param GCPtrEff The address of the image.
6295 * @param enmEffOpSize The operand size (only REX.W really matters).
6296 */
6297IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
6298{
6299 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6300
6301 /*
6302 * Raise exceptions.
6303 */
6304 if (pCtx->cr0 & X86_CR0_EM)
6305 return iemRaiseUndefinedOpcode(pIemCpu);
6306 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
6307 return iemRaiseDeviceNotAvailable(pIemCpu);
6308 if (GCPtrEff & 15)
6309 {
6310 /** @todo CPU/VM detection possible! \#AC might not be signal for
6311 * all/any misalignment sizes, intel says its an implementation detail. */
6312 if ( (pCtx->cr0 & X86_CR0_AM)
6313 && pCtx->eflags.Bits.u1AC
6314 && pIemCpu->uCpl == 3)
6315 return iemRaiseAlignmentCheckException(pIemCpu);
6316 return iemRaiseGeneralProtectionFault0(pIemCpu);
6317 }
6318
6319 /*
6320 * Access the memory.
6321 */
6322 void *pvMem512;
6323 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
6324 if (rcStrict != VINF_SUCCESS)
6325 return rcStrict;
6326 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
6327 PX86FXSTATE pDst = &pCtx->CTX_SUFF(pXState)->x87;
6328
6329 /*
6330 * Check the state for stuff which will #GP(0).
6331 */
6332 uint32_t const fMXCSR = pSrc->MXCSR;
6333 uint32_t const fMXCSR_MASK = pDst->MXCSR_MASK ? pDst->MXCSR_MASK : UINT32_C(0xffbf);
6334 if (fMXCSR & ~fMXCSR_MASK)
6335 {
6336 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
6337 return iemRaiseGeneralProtectionFault0(pIemCpu);
6338 }
6339
6340 /*
6341 * Load the registers.
6342 */
6343 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
6344 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
6345
6346 /* common for all formats */
6347 pDst->FCW = pSrc->FCW;
6348 pDst->FSW = pSrc->FSW;
6349 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
6350 pDst->FOP = pSrc->FOP;
6351 pDst->MXCSR = fMXCSR;
6352 /* (MXCSR_MASK is read-only) */
6353 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
6354 {
6355 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
6356 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
6357 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
6358 pDst->aRegs[i].au32[3] = 0;
6359 }
6360
6361 /* FPU IP, CS, DP and DS. */
6362 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
6363 {
6364 pDst->FPUIP = pSrc->FPUIP;
6365 pDst->CS = pSrc->CS;
6366 pDst->Rsrvd1 = pSrc->Rsrvd1;
6367 pDst->FPUDP = pSrc->FPUDP;
6368 pDst->DS = pSrc->DS;
6369 pDst->Rsrvd2 = pSrc->Rsrvd2;
6370 }
6371 else
6372 {
6373 pDst->FPUIP = pSrc->FPUIP;
6374 pDst->CS = pSrc->CS;
6375 pDst->Rsrvd1 = 0;
6376 pDst->FPUDP = pSrc->FPUDP;
6377 pDst->DS = pSrc->DS;
6378 pDst->Rsrvd2 = 0;
6379 }
6380
6381 /* XMM registers. */
6382 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
6383 || pIemCpu->enmCpuMode != IEMMODE_64BIT
6384 || pIemCpu->uCpl != 0)
6385 {
6386 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
6387 for (uint32_t i = 0; i < cXmmRegs; i++)
6388 pDst->aXMM[i] = pSrc->aXMM[i];
6389 }
6390
6391 /*
6392 * Commit the memory.
6393 */
6394 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
6395 if (rcStrict != VINF_SUCCESS)
6396 return rcStrict;
6397
6398 iemHlpUsedFpu(pIemCpu);
6399 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6400 return VINF_SUCCESS;
6401}
6402
6403
6404/**
6405 * Commmon routine for fnstenv and fnsave.
6406 *
6407 * @param uPtr Where to store the state.
6408 * @param pCtx The CPU context.
6409 */
6410static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
6411{
6412 PCX86FXSTATE pSrcX87 = &pCtx->CTX_SUFF(pXState)->x87;
6413 if (enmEffOpSize == IEMMODE_16BIT)
6414 {
6415 uPtr.pu16[0] = pSrcX87->FCW;
6416 uPtr.pu16[1] = pSrcX87->FSW;
6417 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87);
6418 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6419 {
6420 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
6421 * protected mode or long mode and we save it in real mode? And vice
6422 * versa? And with 32-bit operand size? I think CPU is storing the
6423 * effective address ((CS << 4) + IP) in the offset register and not
6424 * doing any address calculations here. */
6425 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP;
6426 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP;
6427 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP;
6428 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000);
6429 }
6430 else
6431 {
6432 uPtr.pu16[3] = pSrcX87->FPUIP;
6433 uPtr.pu16[4] = pSrcX87->CS;
6434 uPtr.pu16[5] = pSrcX87->FPUDP;
6435 uPtr.pu16[6] = pSrcX87->DS;
6436 }
6437 }
6438 else
6439 {
6440 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
6441 uPtr.pu16[0*2] = pSrcX87->FCW;
6442 uPtr.pu16[1*2] = pSrcX87->FSW;
6443 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87);
6444 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6445 {
6446 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP;
6447 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP;
6448 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP;
6449 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4;
6450 }
6451 else
6452 {
6453 uPtr.pu32[3] = pSrcX87->FPUIP;
6454 uPtr.pu16[4*2] = pSrcX87->CS;
6455 uPtr.pu16[4*2+1]= pSrcX87->FOP;
6456 uPtr.pu32[5] = pSrcX87->FPUDP;
6457 uPtr.pu16[6*2] = pSrcX87->DS;
6458 }
6459 }
6460}
6461
6462
6463/**
6464 * Commmon routine for fldenv and frstor
6465 *
6466 * @param uPtr Where to store the state.
6467 * @param pCtx The CPU context.
6468 */
6469static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
6470{
6471 PX86FXSTATE pDstX87 = &pCtx->CTX_SUFF(pXState)->x87;
6472 if (enmEffOpSize == IEMMODE_16BIT)
6473 {
6474 pDstX87->FCW = uPtr.pu16[0];
6475 pDstX87->FSW = uPtr.pu16[1];
6476 pDstX87->FTW = uPtr.pu16[2];
6477 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6478 {
6479 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
6480 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
6481 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
6482 pDstX87->CS = 0;
6483 pDstX87->Rsrvd1= 0;
6484 pDstX87->DS = 0;
6485 pDstX87->Rsrvd2= 0;
6486 }
6487 else
6488 {
6489 pDstX87->FPUIP = uPtr.pu16[3];
6490 pDstX87->CS = uPtr.pu16[4];
6491 pDstX87->Rsrvd1= 0;
6492 pDstX87->FPUDP = uPtr.pu16[5];
6493 pDstX87->DS = uPtr.pu16[6];
6494 pDstX87->Rsrvd2= 0;
6495 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
6496 }
6497 }
6498 else
6499 {
6500 pDstX87->FCW = uPtr.pu16[0*2];
6501 pDstX87->FSW = uPtr.pu16[1*2];
6502 pDstX87->FTW = uPtr.pu16[2*2];
6503 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
6504 {
6505 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
6506 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
6507 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
6508 pDstX87->CS = 0;
6509 pDstX87->Rsrvd1= 0;
6510 pDstX87->DS = 0;
6511 pDstX87->Rsrvd2= 0;
6512 }
6513 else
6514 {
6515 pDstX87->FPUIP = uPtr.pu32[3];
6516 pDstX87->CS = uPtr.pu16[4*2];
6517 pDstX87->Rsrvd1= 0;
6518 pDstX87->FOP = uPtr.pu16[4*2+1];
6519 pDstX87->FPUDP = uPtr.pu32[5];
6520 pDstX87->DS = uPtr.pu16[6*2];
6521 pDstX87->Rsrvd2= 0;
6522 }
6523 }
6524
6525 /* Make adjustments. */
6526 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW);
6527 pDstX87->FCW &= ~X86_FCW_ZERO_MASK;
6528 iemFpuRecalcExceptionStatus(pDstX87);
6529 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
6530 * exceptions are pending after loading the saved state? */
6531}
6532
6533
6534/**
6535 * Implements 'FNSTENV'.
6536 *
6537 * @param enmEffOpSize The operand size (only REX.W really matters).
6538 * @param iEffSeg The effective segment register for @a GCPtrEff.
6539 * @param GCPtrEffDst The address of the image.
6540 */
6541IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
6542{
6543 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6544 RTPTRUNION uPtr;
6545 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
6546 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6547 if (rcStrict != VINF_SUCCESS)
6548 return rcStrict;
6549
6550 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6551
6552 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6553 if (rcStrict != VINF_SUCCESS)
6554 return rcStrict;
6555
6556 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
6557 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6558 return VINF_SUCCESS;
6559}
6560
6561
6562/**
6563 * Implements 'FNSAVE'.
6564 *
6565 * @param GCPtrEffDst The address of the image.
6566 * @param enmEffOpSize The operand size.
6567 */
6568IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
6569{
6570 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6571 RTPTRUNION uPtr;
6572 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
6573 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6574 if (rcStrict != VINF_SUCCESS)
6575 return rcStrict;
6576
6577 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6578 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6579 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
6580 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
6581 {
6582 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0];
6583 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1];
6584 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4];
6585 }
6586
6587 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
6588 if (rcStrict != VINF_SUCCESS)
6589 return rcStrict;
6590
6591 /*
6592 * Re-initialize the FPU context.
6593 */
6594 pFpuCtx->FCW = 0x37f;
6595 pFpuCtx->FSW = 0;
6596 pFpuCtx->FTW = 0x00; /* 0 - empty */
6597 pFpuCtx->FPUDP = 0;
6598 pFpuCtx->DS = 0;
6599 pFpuCtx->Rsrvd2= 0;
6600 pFpuCtx->FPUIP = 0;
6601 pFpuCtx->CS = 0;
6602 pFpuCtx->Rsrvd1= 0;
6603 pFpuCtx->FOP = 0;
6604
6605 iemHlpUsedFpu(pIemCpu);
6606 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6607 return VINF_SUCCESS;
6608}
6609
6610
6611
6612/**
6613 * Implements 'FLDENV'.
6614 *
6615 * @param enmEffOpSize The operand size (only REX.W really matters).
6616 * @param iEffSeg The effective segment register for @a GCPtrEff.
6617 * @param GCPtrEffSrc The address of the image.
6618 */
6619IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
6620{
6621 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6622 RTCPTRUNION uPtr;
6623 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
6624 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
6625 if (rcStrict != VINF_SUCCESS)
6626 return rcStrict;
6627
6628 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6629
6630 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
6631 if (rcStrict != VINF_SUCCESS)
6632 return rcStrict;
6633
6634 iemHlpUsedFpu(pIemCpu);
6635 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6636 return VINF_SUCCESS;
6637}
6638
6639
6640/**
6641 * Implements 'FRSTOR'.
6642 *
6643 * @param GCPtrEffSrc The address of the image.
6644 * @param enmEffOpSize The operand size.
6645 */
6646IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
6647{
6648 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6649 RTCPTRUNION uPtr;
6650 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
6651 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
6652 if (rcStrict != VINF_SUCCESS)
6653 return rcStrict;
6654
6655 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6656 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
6657 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
6658 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
6659 {
6660 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0];
6661 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1];
6662 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4];
6663 pFpuCtx->aRegs[i].au32[3] = 0;
6664 }
6665
6666 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
6667 if (rcStrict != VINF_SUCCESS)
6668 return rcStrict;
6669
6670 iemHlpUsedFpu(pIemCpu);
6671 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6672 return VINF_SUCCESS;
6673}
6674
6675
6676/**
6677 * Implements 'FLDCW'.
6678 *
6679 * @param u16Fcw The new FCW.
6680 */
6681IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
6682{
6683 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6684
6685 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
6686 /** @todo Testcase: Try see what happens when trying to set undefined bits
6687 * (other than 6 and 7). Currently ignoring them. */
6688 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
6689 * according to FSW. (This is was is currently implemented.) */
6690 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6691 pFpuCtx->FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
6692 iemFpuRecalcExceptionStatus(pFpuCtx);
6693
6694 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
6695 iemHlpUsedFpu(pIemCpu);
6696 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6697 return VINF_SUCCESS;
6698}
6699
6700
6701
6702/**
6703 * Implements the underflow case of fxch.
6704 *
6705 * @param iStReg The other stack register.
6706 */
6707IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
6708{
6709 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6710
6711 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6712 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW);
6713 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
6714 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW));
6715
6716 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
6717 * registers are read as QNaN and then exchanged. This could be
6718 * wrong... */
6719 if (pFpuCtx->FCW & X86_FCW_IM)
6720 {
6721 if (RT_BIT(iReg1) & pFpuCtx->FTW)
6722 {
6723 if (RT_BIT(iReg2) & pFpuCtx->FTW)
6724 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6725 else
6726 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80;
6727 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
6728 }
6729 else
6730 {
6731 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80;
6732 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6733 }
6734 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6735 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
6736 }
6737 else
6738 {
6739 /* raise underflow exception, don't change anything. */
6740 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
6741 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6742 }
6743
6744 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
6745 iemHlpUsedFpu(pIemCpu);
6746 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6747 return VINF_SUCCESS;
6748}
6749
6750
6751/**
6752 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
6753 *
6754 * @param cToAdd 1 or 7.
6755 */
6756IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
6757{
6758 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
6759 Assert(iStReg < 8);
6760
6761 /*
6762 * Raise exceptions.
6763 */
6764 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
6765 return iemRaiseDeviceNotAvailable(pIemCpu);
6766
6767 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6768 uint16_t u16Fsw = pFpuCtx->FSW;
6769 if (u16Fsw & X86_FSW_ES)
6770 return iemRaiseMathFault(pIemCpu);
6771
6772 /*
6773 * Check if any of the register accesses causes #SF + #IA.
6774 */
6775 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
6776 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
6777 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
6778 {
6779 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
6780 NOREF(u32Eflags);
6781
6782 pFpuCtx->FSW &= ~X86_FSW_C1;
6783 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
6784 if ( !(u16Fsw & X86_FSW_IE)
6785 || (pFpuCtx->FCW & X86_FCW_IM) )
6786 {
6787 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
6788 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
6789 }
6790 }
6791 else if (pFpuCtx->FCW & X86_FCW_IM)
6792 {
6793 /* Masked underflow. */
6794 pFpuCtx->FSW &= ~X86_FSW_C1;
6795 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6796 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
6797 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
6798 }
6799 else
6800 {
6801 /* Raise underflow - don't touch EFLAGS or TOP. */
6802 pFpuCtx->FSW &= ~X86_FSW_C1;
6803 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6804 fPop = false;
6805 }
6806
6807 /*
6808 * Pop if necessary.
6809 */
6810 if (fPop)
6811 {
6812 pFpuCtx->FTW &= ~RT_BIT(iReg1);
6813 pFpuCtx->FSW &= X86_FSW_TOP_MASK;
6814 pFpuCtx->FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
6815 }
6816
6817 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx, pFpuCtx);
6818 iemHlpUsedFpu(pIemCpu);
6819 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
6820 return VINF_SUCCESS;
6821}
6822
6823/** @} */
6824
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette