VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 67448

Last change on this file since 67448 was 67163, checked in by vboxsync, 8 years ago

VMM/IEM: Temporary hack for toggling forcing of execution to continue in IEM.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 285.2 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 67163 2017-05-31 10:21:53Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @name Misc Helpers
19 * @{
20 */
21
22
23/**
24 * Worker function for iemHlpCheckPortIOPermission, don't call directly.
25 *
26 * @returns Strict VBox status code.
27 *
28 * @param pVCpu The cross context virtual CPU structure of the calling thread.
29 * @param pCtx The register context.
30 * @param u16Port The port number.
31 * @param cbOperand The operand size.
32 */
33static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PVMCPU pVCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
34{
35 /* The TSS bits we're interested in are the same on 386 and AMD64. */
36 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY);
37 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
38 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap);
39 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64));
40
41 /*
42 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
43 */
44 Assert(!pCtx->tr.Attr.n.u1DescType);
45 if (RT_UNLIKELY( pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY
46 && pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))
47 {
48 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
49 u16Port, cbOperand, pCtx->tr.Attr.n.u4Type, pCtx->tr.Attr.u));
50 return iemRaiseGeneralProtectionFault0(pVCpu);
51 }
52
53 /*
54 * Read the bitmap offset (may #PF).
55 */
56 uint16_t offBitmap;
57 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &offBitmap, UINT8_MAX,
58 pCtx->tr.u64Base + RT_OFFSETOF(X86TSS64, offIoBitmap));
59 if (rcStrict != VINF_SUCCESS)
60 {
61 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
62 return rcStrict;
63 }
64
65 /*
66 * The bit range from u16Port to (u16Port + cbOperand - 1), however intel
67 * describes the CPU actually reading two bytes regardless of whether the
68 * bit range crosses a byte boundrary. Thus the + 1 in the test below.
69 */
70 uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap;
71 /** @todo check if real CPUs ensures that offBitmap has a minimum value of
72 * for instance sizeof(X86TSS32). */
73 if (offFirstBit + 1 > pCtx->tr.u32Limit) /* the limit is inclusive */
74 {
75 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
76 offFirstBit, pCtx->tr.u32Limit));
77 return iemRaiseGeneralProtectionFault0(pVCpu);
78 }
79
80 /*
81 * Read the necessary bits.
82 */
83 /** @todo Test the assertion in the intel manual that the CPU reads two
84 * bytes. The question is how this works wrt to #PF and #GP on the
85 * 2nd byte when it's not required. */
86 uint16_t bmBytes = UINT16_MAX;
87 rcStrict = iemMemFetchSysU16(pVCpu, &bmBytes, UINT8_MAX, pCtx->tr.u64Base + offFirstBit);
88 if (rcStrict != VINF_SUCCESS)
89 {
90 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
91 return rcStrict;
92 }
93
94 /*
95 * Perform the check.
96 */
97 uint16_t fPortMask = (1 << cbOperand) - 1;
98 bmBytes >>= (u16Port & 7);
99 if (bmBytes & fPortMask)
100 {
101 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
102 u16Port, cbOperand, bmBytes, fPortMask));
103 return iemRaiseGeneralProtectionFault0(pVCpu);
104 }
105
106 return VINF_SUCCESS;
107}
108
109
110/**
111 * Checks if we are allowed to access the given I/O port, raising the
112 * appropriate exceptions if we aren't (or if the I/O bitmap is not
113 * accessible).
114 *
115 * @returns Strict VBox status code.
116 *
117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
118 * @param pCtx The register context.
119 * @param u16Port The port number.
120 * @param cbOperand The operand size.
121 */
122DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PVMCPU pVCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
123{
124 X86EFLAGS Efl;
125 Efl.u = IEMMISC_GET_EFL(pVCpu, pCtx);
126 if ( (pCtx->cr0 & X86_CR0_PE)
127 && ( pVCpu->iem.s.uCpl > Efl.Bits.u2IOPL
128 || Efl.Bits.u1VM) )
129 return iemHlpCheckPortIOPermissionBitmap(pVCpu, pCtx, u16Port, cbOperand);
130 return VINF_SUCCESS;
131}
132
133
134#if 0
135/**
136 * Calculates the parity bit.
137 *
138 * @returns true if the bit is set, false if not.
139 * @param u8Result The least significant byte of the result.
140 */
141static bool iemHlpCalcParityFlag(uint8_t u8Result)
142{
143 /*
144 * Parity is set if the number of bits in the least significant byte of
145 * the result is even.
146 */
147 uint8_t cBits;
148 cBits = u8Result & 1; /* 0 */
149 u8Result >>= 1;
150 cBits += u8Result & 1;
151 u8Result >>= 1;
152 cBits += u8Result & 1;
153 u8Result >>= 1;
154 cBits += u8Result & 1;
155 u8Result >>= 1;
156 cBits += u8Result & 1; /* 4 */
157 u8Result >>= 1;
158 cBits += u8Result & 1;
159 u8Result >>= 1;
160 cBits += u8Result & 1;
161 u8Result >>= 1;
162 cBits += u8Result & 1;
163 return !(cBits & 1);
164}
165#endif /* not used */
166
167
168/**
169 * Updates the specified flags according to a 8-bit result.
170 *
171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
172 * @param u8Result The result to set the flags according to.
173 * @param fToUpdate The flags to update.
174 * @param fUndefined The flags that are specified as undefined.
175 */
176static void iemHlpUpdateArithEFlagsU8(PVMCPU pVCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
177{
178 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
179
180 uint32_t fEFlags = pCtx->eflags.u;
181 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
182 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
183 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
184#ifdef IEM_VERIFICATION_MODE_FULL
185 pVCpu->iem.s.fUndefinedEFlags |= fUndefined;
186#endif
187}
188
189
190/**
191 * Updates the specified flags according to a 16-bit result.
192 *
193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
194 * @param u16Result The result to set the flags according to.
195 * @param fToUpdate The flags to update.
196 * @param fUndefined The flags that are specified as undefined.
197 */
198static void iemHlpUpdateArithEFlagsU16(PVMCPU pVCpu, uint16_t u16Result, uint32_t fToUpdate, uint32_t fUndefined)
199{
200 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
201
202 uint32_t fEFlags = pCtx->eflags.u;
203 iemAImpl_test_u16(&u16Result, u16Result, &fEFlags);
204 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
205 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
206#ifdef IEM_VERIFICATION_MODE_FULL
207 pVCpu->iem.s.fUndefinedEFlags |= fUndefined;
208#endif
209}
210
211
212/**
213 * Helper used by iret.
214 *
215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
216 * @param uCpl The new CPL.
217 * @param pSReg Pointer to the segment register.
218 */
219static void iemHlpAdjustSelectorForNewCpl(PVMCPU pVCpu, uint8_t uCpl, PCPUMSELREG pSReg)
220{
221#ifdef VBOX_WITH_RAW_MODE_NOT_R0
222 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
223 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
224#else
225 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
226#endif
227
228 if ( uCpl > pSReg->Attr.n.u2Dpl
229 && pSReg->Attr.n.u1DescType /* code or data, not system */
230 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
231 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
232 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, 0);
233}
234
235
236/**
237 * Indicates that we have modified the FPU state.
238 *
239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
240 */
241DECLINLINE(void) iemHlpUsedFpu(PVMCPU pVCpu)
242{
243 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
244}
245
246/** @} */
247
248/** @name C Implementations
249 * @{
250 */
251
252/**
253 * Implements a 16-bit popa.
254 */
255IEM_CIMPL_DEF_0(iemCImpl_popa_16)
256{
257 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
258 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu, pCtx);
259 RTGCPTR GCPtrLast = GCPtrStart + 15;
260 VBOXSTRICTRC rcStrict;
261
262 /*
263 * The docs are a bit hard to comprehend here, but it looks like we wrap
264 * around in real mode as long as none of the individual "popa" crosses the
265 * end of the stack segment. In protected mode we check the whole access
266 * in one go. For efficiency, only do the word-by-word thing if we're in
267 * danger of wrapping around.
268 */
269 /** @todo do popa boundary / wrap-around checks. */
270 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
271 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
272 {
273 /* word-by-word */
274 RTUINT64U TmpRsp;
275 TmpRsp.u = pCtx->rsp;
276 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->di, &TmpRsp);
277 if (rcStrict == VINF_SUCCESS)
278 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->si, &TmpRsp);
279 if (rcStrict == VINF_SUCCESS)
280 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->bp, &TmpRsp);
281 if (rcStrict == VINF_SUCCESS)
282 {
283 iemRegAddToRspEx(pVCpu, pCtx, &TmpRsp, 2); /* sp */
284 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->bx, &TmpRsp);
285 }
286 if (rcStrict == VINF_SUCCESS)
287 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->dx, &TmpRsp);
288 if (rcStrict == VINF_SUCCESS)
289 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->cx, &TmpRsp);
290 if (rcStrict == VINF_SUCCESS)
291 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->ax, &TmpRsp);
292 if (rcStrict == VINF_SUCCESS)
293 {
294 pCtx->rsp = TmpRsp.u;
295 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
296 }
297 }
298 else
299 {
300 uint16_t const *pa16Mem = NULL;
301 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
302 if (rcStrict == VINF_SUCCESS)
303 {
304 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
305 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
306 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
307 /* skip sp */
308 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
309 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
310 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
311 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
312 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
313 if (rcStrict == VINF_SUCCESS)
314 {
315 iemRegAddToRsp(pVCpu, pCtx, 16);
316 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
317 }
318 }
319 }
320 return rcStrict;
321}
322
323
324/**
325 * Implements a 32-bit popa.
326 */
327IEM_CIMPL_DEF_0(iemCImpl_popa_32)
328{
329 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
330 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu, pCtx);
331 RTGCPTR GCPtrLast = GCPtrStart + 31;
332 VBOXSTRICTRC rcStrict;
333
334 /*
335 * The docs are a bit hard to comprehend here, but it looks like we wrap
336 * around in real mode as long as none of the individual "popa" crosses the
337 * end of the stack segment. In protected mode we check the whole access
338 * in one go. For efficiency, only do the word-by-word thing if we're in
339 * danger of wrapping around.
340 */
341 /** @todo do popa boundary / wrap-around checks. */
342 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
343 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
344 {
345 /* word-by-word */
346 RTUINT64U TmpRsp;
347 TmpRsp.u = pCtx->rsp;
348 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->edi, &TmpRsp);
349 if (rcStrict == VINF_SUCCESS)
350 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->esi, &TmpRsp);
351 if (rcStrict == VINF_SUCCESS)
352 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->ebp, &TmpRsp);
353 if (rcStrict == VINF_SUCCESS)
354 {
355 iemRegAddToRspEx(pVCpu, pCtx, &TmpRsp, 2); /* sp */
356 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->ebx, &TmpRsp);
357 }
358 if (rcStrict == VINF_SUCCESS)
359 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->edx, &TmpRsp);
360 if (rcStrict == VINF_SUCCESS)
361 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->ecx, &TmpRsp);
362 if (rcStrict == VINF_SUCCESS)
363 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->eax, &TmpRsp);
364 if (rcStrict == VINF_SUCCESS)
365 {
366#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
367 pCtx->rdi &= UINT32_MAX;
368 pCtx->rsi &= UINT32_MAX;
369 pCtx->rbp &= UINT32_MAX;
370 pCtx->rbx &= UINT32_MAX;
371 pCtx->rdx &= UINT32_MAX;
372 pCtx->rcx &= UINT32_MAX;
373 pCtx->rax &= UINT32_MAX;
374#endif
375 pCtx->rsp = TmpRsp.u;
376 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
377 }
378 }
379 else
380 {
381 uint32_t const *pa32Mem;
382 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
383 if (rcStrict == VINF_SUCCESS)
384 {
385 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
386 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
387 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
388 /* skip esp */
389 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
390 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
391 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
392 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
393 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
394 if (rcStrict == VINF_SUCCESS)
395 {
396 iemRegAddToRsp(pVCpu, pCtx, 32);
397 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
398 }
399 }
400 }
401 return rcStrict;
402}
403
404
405/**
406 * Implements a 16-bit pusha.
407 */
408IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
409{
410 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
411 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu, pCtx);
412 RTGCPTR GCPtrBottom = GCPtrTop - 15;
413 VBOXSTRICTRC rcStrict;
414
415 /*
416 * The docs are a bit hard to comprehend here, but it looks like we wrap
417 * around in real mode as long as none of the individual "pushd" crosses the
418 * end of the stack segment. In protected mode we check the whole access
419 * in one go. For efficiency, only do the word-by-word thing if we're in
420 * danger of wrapping around.
421 */
422 /** @todo do pusha boundary / wrap-around checks. */
423 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
424 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
425 {
426 /* word-by-word */
427 RTUINT64U TmpRsp;
428 TmpRsp.u = pCtx->rsp;
429 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->ax, &TmpRsp);
430 if (rcStrict == VINF_SUCCESS)
431 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->cx, &TmpRsp);
432 if (rcStrict == VINF_SUCCESS)
433 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->dx, &TmpRsp);
434 if (rcStrict == VINF_SUCCESS)
435 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->bx, &TmpRsp);
436 if (rcStrict == VINF_SUCCESS)
437 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->sp, &TmpRsp);
438 if (rcStrict == VINF_SUCCESS)
439 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->bp, &TmpRsp);
440 if (rcStrict == VINF_SUCCESS)
441 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->si, &TmpRsp);
442 if (rcStrict == VINF_SUCCESS)
443 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->di, &TmpRsp);
444 if (rcStrict == VINF_SUCCESS)
445 {
446 pCtx->rsp = TmpRsp.u;
447 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
448 }
449 }
450 else
451 {
452 GCPtrBottom--;
453 uint16_t *pa16Mem = NULL;
454 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
455 if (rcStrict == VINF_SUCCESS)
456 {
457 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
458 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
459 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
460 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
461 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
462 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
463 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
464 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
465 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
466 if (rcStrict == VINF_SUCCESS)
467 {
468 iemRegSubFromRsp(pVCpu, pCtx, 16);
469 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
470 }
471 }
472 }
473 return rcStrict;
474}
475
476
477/**
478 * Implements a 32-bit pusha.
479 */
480IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
481{
482 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
483 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu, pCtx);
484 RTGCPTR GCPtrBottom = GCPtrTop - 31;
485 VBOXSTRICTRC rcStrict;
486
487 /*
488 * The docs are a bit hard to comprehend here, but it looks like we wrap
489 * around in real mode as long as none of the individual "pusha" crosses the
490 * end of the stack segment. In protected mode we check the whole access
491 * in one go. For efficiency, only do the word-by-word thing if we're in
492 * danger of wrapping around.
493 */
494 /** @todo do pusha boundary / wrap-around checks. */
495 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
496 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
497 {
498 /* word-by-word */
499 RTUINT64U TmpRsp;
500 TmpRsp.u = pCtx->rsp;
501 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->eax, &TmpRsp);
502 if (rcStrict == VINF_SUCCESS)
503 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->ecx, &TmpRsp);
504 if (rcStrict == VINF_SUCCESS)
505 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->edx, &TmpRsp);
506 if (rcStrict == VINF_SUCCESS)
507 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->ebx, &TmpRsp);
508 if (rcStrict == VINF_SUCCESS)
509 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->esp, &TmpRsp);
510 if (rcStrict == VINF_SUCCESS)
511 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->ebp, &TmpRsp);
512 if (rcStrict == VINF_SUCCESS)
513 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->esi, &TmpRsp);
514 if (rcStrict == VINF_SUCCESS)
515 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->edi, &TmpRsp);
516 if (rcStrict == VINF_SUCCESS)
517 {
518 pCtx->rsp = TmpRsp.u;
519 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
520 }
521 }
522 else
523 {
524 GCPtrBottom--;
525 uint32_t *pa32Mem;
526 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
527 if (rcStrict == VINF_SUCCESS)
528 {
529 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
530 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
531 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
532 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
533 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
534 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
535 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
536 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
537 rcStrict = iemMemCommitAndUnmap(pVCpu, pa32Mem, IEM_ACCESS_STACK_W);
538 if (rcStrict == VINF_SUCCESS)
539 {
540 iemRegSubFromRsp(pVCpu, pCtx, 32);
541 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
542 }
543 }
544 }
545 return rcStrict;
546}
547
548
549/**
550 * Implements pushf.
551 *
552 *
553 * @param enmEffOpSize The effective operand size.
554 */
555IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
556{
557 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
558 VBOXSTRICTRC rcStrict;
559
560 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_PUSHF))
561 {
562 Log2(("pushf: Guest intercept -> #VMEXIT\n"));
563 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
564 }
565
566 /*
567 * If we're in V8086 mode some care is required (which is why we're in
568 * doing this in a C implementation).
569 */
570 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
571 if ( (fEfl & X86_EFL_VM)
572 && X86_EFL_GET_IOPL(fEfl) != 3 )
573 {
574 Assert(pCtx->cr0 & X86_CR0_PE);
575 if ( enmEffOpSize != IEMMODE_16BIT
576 || !(pCtx->cr4 & X86_CR4_VME))
577 return iemRaiseGeneralProtectionFault0(pVCpu);
578 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
579 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
580 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
581 }
582 else
583 {
584
585 /*
586 * Ok, clear RF and VM, adjust for ancient CPUs, and push the flags.
587 */
588 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
589
590 switch (enmEffOpSize)
591 {
592 case IEMMODE_16BIT:
593 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
594 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_186)
595 fEfl |= UINT16_C(0xf000);
596 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
597 break;
598 case IEMMODE_32BIT:
599 rcStrict = iemMemStackPushU32(pVCpu, fEfl);
600 break;
601 case IEMMODE_64BIT:
602 rcStrict = iemMemStackPushU64(pVCpu, fEfl);
603 break;
604 IEM_NOT_REACHED_DEFAULT_CASE_RET();
605 }
606 }
607 if (rcStrict != VINF_SUCCESS)
608 return rcStrict;
609
610 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
611 return VINF_SUCCESS;
612}
613
614
615/**
616 * Implements popf.
617 *
618 * @param enmEffOpSize The effective operand size.
619 */
620IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
621{
622 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
623 uint32_t const fEflOld = IEMMISC_GET_EFL(pVCpu, pCtx);
624 VBOXSTRICTRC rcStrict;
625 uint32_t fEflNew;
626
627 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_POPF))
628 {
629 Log2(("popf: Guest intercept -> #VMEXIT\n"));
630 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
631 }
632
633 /*
634 * V8086 is special as usual.
635 */
636 if (fEflOld & X86_EFL_VM)
637 {
638 /*
639 * Almost anything goes if IOPL is 3.
640 */
641 if (X86_EFL_GET_IOPL(fEflOld) == 3)
642 {
643 switch (enmEffOpSize)
644 {
645 case IEMMODE_16BIT:
646 {
647 uint16_t u16Value;
648 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
649 if (rcStrict != VINF_SUCCESS)
650 return rcStrict;
651 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
652 break;
653 }
654 case IEMMODE_32BIT:
655 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
656 if (rcStrict != VINF_SUCCESS)
657 return rcStrict;
658 break;
659 IEM_NOT_REACHED_DEFAULT_CASE_RET();
660 }
661
662 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
663 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
664 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
665 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
666 }
667 /*
668 * Interrupt flag virtualization with CR4.VME=1.
669 */
670 else if ( enmEffOpSize == IEMMODE_16BIT
671 && (pCtx->cr4 & X86_CR4_VME) )
672 {
673 uint16_t u16Value;
674 RTUINT64U TmpRsp;
675 TmpRsp.u = pCtx->rsp;
676 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp);
677 if (rcStrict != VINF_SUCCESS)
678 return rcStrict;
679
680 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
681 * or before? */
682 if ( ( (u16Value & X86_EFL_IF)
683 && (fEflOld & X86_EFL_VIP))
684 || (u16Value & X86_EFL_TF) )
685 return iemRaiseGeneralProtectionFault0(pVCpu);
686
687 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
688 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
689 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
690 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
691
692 pCtx->rsp = TmpRsp.u;
693 }
694 else
695 return iemRaiseGeneralProtectionFault0(pVCpu);
696
697 }
698 /*
699 * Not in V8086 mode.
700 */
701 else
702 {
703 /* Pop the flags. */
704 switch (enmEffOpSize)
705 {
706 case IEMMODE_16BIT:
707 {
708 uint16_t u16Value;
709 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
710 if (rcStrict != VINF_SUCCESS)
711 return rcStrict;
712 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
713
714 /*
715 * Ancient CPU adjustments:
716 * - 8086, 80186, V20/30:
717 * Fixed bits 15:12 bits are not kept correctly internally, mostly for
718 * practical reasons (masking below). We add them when pushing flags.
719 * - 80286:
720 * The NT and IOPL flags cannot be popped from real mode and are
721 * therefore always zero (since a 286 can never exit from PM and
722 * their initial value is zero). This changed on a 386 and can
723 * therefore be used to detect 286 or 386 CPU in real mode.
724 */
725 if ( IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286
726 && !(pCtx->cr0 & X86_CR0_PE) )
727 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL);
728 break;
729 }
730 case IEMMODE_32BIT:
731 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
732 if (rcStrict != VINF_SUCCESS)
733 return rcStrict;
734 break;
735 case IEMMODE_64BIT:
736 {
737 uint64_t u64Value;
738 rcStrict = iemMemStackPopU64(pVCpu, &u64Value);
739 if (rcStrict != VINF_SUCCESS)
740 return rcStrict;
741 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
742 break;
743 }
744 IEM_NOT_REACHED_DEFAULT_CASE_RET();
745 }
746
747 /* Merge them with the current flags. */
748 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
749 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
750 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
751 || pVCpu->iem.s.uCpl == 0)
752 {
753 fEflNew &= fPopfBits;
754 fEflNew |= ~fPopfBits & fEflOld;
755 }
756 else if (pVCpu->iem.s.uCpl <= X86_EFL_GET_IOPL(fEflOld))
757 {
758 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
759 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
760 }
761 else
762 {
763 fEflNew &= fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF);
764 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
765 }
766 }
767
768 /*
769 * Commit the flags.
770 */
771 Assert(fEflNew & RT_BIT_32(1));
772 IEMMISC_SET_EFL(pVCpu, pCtx, fEflNew);
773 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
774
775 return VINF_SUCCESS;
776}
777
778
779/**
780 * Implements an indirect call.
781 *
782 * @param uNewPC The new program counter (RIP) value (loaded from the
783 * operand).
784 * @param enmEffOpSize The effective operand size.
785 */
786IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
787{
788 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
789 uint16_t uOldPC = pCtx->ip + cbInstr;
790 if (uNewPC > pCtx->cs.u32Limit)
791 return iemRaiseGeneralProtectionFault0(pVCpu);
792
793 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
794 if (rcStrict != VINF_SUCCESS)
795 return rcStrict;
796
797 pCtx->rip = uNewPC;
798 pCtx->eflags.Bits.u1RF = 0;
799
800#ifndef IEM_WITH_CODE_TLB
801 /* Flush the prefetch buffer. */
802 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
803#endif
804 return VINF_SUCCESS;
805}
806
807
808/**
809 * Implements a 16-bit relative call.
810 *
811 * @param offDisp The displacment offset.
812 */
813IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
814{
815 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
816 uint16_t uOldPC = pCtx->ip + cbInstr;
817 uint16_t uNewPC = uOldPC + offDisp;
818 if (uNewPC > pCtx->cs.u32Limit)
819 return iemRaiseGeneralProtectionFault0(pVCpu);
820
821 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
822 if (rcStrict != VINF_SUCCESS)
823 return rcStrict;
824
825 pCtx->rip = uNewPC;
826 pCtx->eflags.Bits.u1RF = 0;
827
828#ifndef IEM_WITH_CODE_TLB
829 /* Flush the prefetch buffer. */
830 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
831#endif
832 return VINF_SUCCESS;
833}
834
835
836/**
837 * Implements a 32-bit indirect call.
838 *
839 * @param uNewPC The new program counter (RIP) value (loaded from the
840 * operand).
841 * @param enmEffOpSize The effective operand size.
842 */
843IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
844{
845 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
846 uint32_t uOldPC = pCtx->eip + cbInstr;
847 if (uNewPC > pCtx->cs.u32Limit)
848 return iemRaiseGeneralProtectionFault0(pVCpu);
849
850 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
851 if (rcStrict != VINF_SUCCESS)
852 return rcStrict;
853
854#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE) && defined(VBOX_WITH_CALL_RECORD)
855 /*
856 * CASM hook for recording interesting indirect calls.
857 */
858 if ( !pCtx->eflags.Bits.u1IF
859 && (pCtx->cr0 & X86_CR0_PG)
860 && !CSAMIsEnabled(pVCpu->CTX_SUFF(pVM))
861 && pVCpu->iem.s.uCpl == 0)
862 {
863 EMSTATE enmState = EMGetState(pVCpu);
864 if ( enmState == EMSTATE_IEM_THEN_REM
865 || enmState == EMSTATE_IEM
866 || enmState == EMSTATE_REM)
867 CSAMR3RecordCallAddress(pVCpu->CTX_SUFF(pVM), pCtx->eip);
868 }
869#endif
870
871 pCtx->rip = uNewPC;
872 pCtx->eflags.Bits.u1RF = 0;
873
874#ifndef IEM_WITH_CODE_TLB
875 /* Flush the prefetch buffer. */
876 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
877#endif
878 return VINF_SUCCESS;
879}
880
881
882/**
883 * Implements a 32-bit relative call.
884 *
885 * @param offDisp The displacment offset.
886 */
887IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
888{
889 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
890 uint32_t uOldPC = pCtx->eip + cbInstr;
891 uint32_t uNewPC = uOldPC + offDisp;
892 if (uNewPC > pCtx->cs.u32Limit)
893 return iemRaiseGeneralProtectionFault0(pVCpu);
894
895 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
896 if (rcStrict != VINF_SUCCESS)
897 return rcStrict;
898
899 pCtx->rip = uNewPC;
900 pCtx->eflags.Bits.u1RF = 0;
901
902#ifndef IEM_WITH_CODE_TLB
903 /* Flush the prefetch buffer. */
904 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
905#endif
906 return VINF_SUCCESS;
907}
908
909
910/**
911 * Implements a 64-bit indirect call.
912 *
913 * @param uNewPC The new program counter (RIP) value (loaded from the
914 * operand).
915 * @param enmEffOpSize The effective operand size.
916 */
917IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
918{
919 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
920 uint64_t uOldPC = pCtx->rip + cbInstr;
921 if (!IEM_IS_CANONICAL(uNewPC))
922 return iemRaiseGeneralProtectionFault0(pVCpu);
923
924 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
925 if (rcStrict != VINF_SUCCESS)
926 return rcStrict;
927
928 pCtx->rip = uNewPC;
929 pCtx->eflags.Bits.u1RF = 0;
930
931#ifndef IEM_WITH_CODE_TLB
932 /* Flush the prefetch buffer. */
933 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
934#endif
935 return VINF_SUCCESS;
936}
937
938
939/**
940 * Implements a 64-bit relative call.
941 *
942 * @param offDisp The displacment offset.
943 */
944IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
945{
946 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
947 uint64_t uOldPC = pCtx->rip + cbInstr;
948 uint64_t uNewPC = uOldPC + offDisp;
949 if (!IEM_IS_CANONICAL(uNewPC))
950 return iemRaiseNotCanonical(pVCpu);
951
952 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
953 if (rcStrict != VINF_SUCCESS)
954 return rcStrict;
955
956 pCtx->rip = uNewPC;
957 pCtx->eflags.Bits.u1RF = 0;
958
959#ifndef IEM_WITH_CODE_TLB
960 /* Flush the prefetch buffer. */
961 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
962#endif
963
964 return VINF_SUCCESS;
965}
966
967
968/**
969 * Implements far jumps and calls thru task segments (TSS).
970 *
971 * @param uSel The selector.
972 * @param enmBranch The kind of branching we're performing.
973 * @param enmEffOpSize The effective operand size.
974 * @param pDesc The descriptor corresponding to @a uSel. The type is
975 * task gate.
976 */
977IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
978{
979#ifndef IEM_IMPLEMENTS_TASKSWITCH
980 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
981#else
982 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
983 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
984 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
985 RT_NOREF_PV(enmEffOpSize);
986
987 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
988 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
989 {
990 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
991 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
992 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
993 }
994
995 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
996 * far calls (see iemCImpl_callf). Most likely in both cases it should be
997 * checked here, need testcases. */
998 if (!pDesc->Legacy.Gen.u1Present)
999 {
1000 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel));
1001 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1002 }
1003
1004 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1005 uint32_t uNextEip = pCtx->eip + cbInstr;
1006 return iemTaskSwitch(pVCpu, pCtx, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1007 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc);
1008#endif
1009}
1010
1011
1012/**
1013 * Implements far jumps and calls thru task gates.
1014 *
1015 * @param uSel The selector.
1016 * @param enmBranch The kind of branching we're performing.
1017 * @param enmEffOpSize The effective operand size.
1018 * @param pDesc The descriptor corresponding to @a uSel. The type is
1019 * task gate.
1020 */
1021IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1022{
1023#ifndef IEM_IMPLEMENTS_TASKSWITCH
1024 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1025#else
1026 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1027 RT_NOREF_PV(enmEffOpSize);
1028
1029 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
1030 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1031 {
1032 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1033 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1034 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1035 }
1036
1037 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
1038 * far calls (see iemCImpl_callf). Most likely in both cases it should be
1039 * checked here, need testcases. */
1040 if (!pDesc->Legacy.Gen.u1Present)
1041 {
1042 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel));
1043 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1044 }
1045
1046 /*
1047 * Fetch the new TSS descriptor from the GDT.
1048 */
1049 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel;
1050 if (uSelTss & X86_SEL_LDT)
1051 {
1052 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss));
1053 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1054 }
1055
1056 IEMSELDESC TssDesc;
1057 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelTss, X86_XCPT_GP);
1058 if (rcStrict != VINF_SUCCESS)
1059 return rcStrict;
1060
1061 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK)
1062 {
1063 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss,
1064 TssDesc.Legacy.Gate.u4Type));
1065 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1066 }
1067
1068 if (!TssDesc.Legacy.Gate.u1Present)
1069 {
1070 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss));
1071 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelTss & X86_SEL_MASK_OFF_RPL);
1072 }
1073
1074 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1075 uint32_t uNextEip = pCtx->eip + cbInstr;
1076 return iemTaskSwitch(pVCpu, pCtx, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1077 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc);
1078#endif
1079}
1080
1081
1082/**
1083 * Implements far jumps and calls thru call gates.
1084 *
1085 * @param uSel The selector.
1086 * @param enmBranch The kind of branching we're performing.
1087 * @param enmEffOpSize The effective operand size.
1088 * @param pDesc The descriptor corresponding to @a uSel. The type is
1089 * call gate.
1090 */
1091IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1092{
1093#define IEM_IMPLEMENTS_CALLGATE
1094#ifndef IEM_IMPLEMENTS_CALLGATE
1095 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1096#else
1097 RT_NOREF_PV(enmEffOpSize);
1098
1099 /* NB: Far jumps can only do intra-privilege transfers. Far calls support
1100 * inter-privilege calls and are much more complex.
1101 *
1102 * NB: 64-bit call gate has the same type as a 32-bit call gate! If
1103 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
1104 * must be 16-bit or 32-bit.
1105 */
1106 /** @todo: effective operand size is probably irrelevant here, only the
1107 * call gate bitness matters??
1108 */
1109 VBOXSTRICTRC rcStrict;
1110 RTPTRUNION uPtrRet;
1111 uint64_t uNewRsp;
1112 uint64_t uNewRip;
1113 uint64_t u64Base;
1114 uint32_t cbLimit;
1115 RTSEL uNewCS;
1116 IEMSELDESC DescCS;
1117
1118 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE);
1119 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1120 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE
1121 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE);
1122
1123 /* Determine the new instruction pointer from the gate descriptor. */
1124 uNewRip = pDesc->Legacy.Gate.u16OffsetLow
1125 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16)
1126 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32);
1127
1128 /* Perform DPL checks on the gate descriptor. */
1129 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
1130 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1131 {
1132 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1133 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1134 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1135 }
1136
1137 /** @todo does this catch NULL selectors, too? */
1138 if (!pDesc->Legacy.Gen.u1Present)
1139 {
1140 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel));
1141 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1142 }
1143
1144 /*
1145 * Fetch the target CS descriptor from the GDT or LDT.
1146 */
1147 uNewCS = pDesc->Legacy.Gate.u16Sel;
1148 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_GP);
1149 if (rcStrict != VINF_SUCCESS)
1150 return rcStrict;
1151
1152 /* Target CS must be a code selector. */
1153 if ( !DescCS.Legacy.Gen.u1DescType
1154 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1155 {
1156 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1157 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1158 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1159 }
1160
1161 /* Privilege checks on target CS. */
1162 if (enmBranch == IEMBRANCH_JUMP)
1163 {
1164 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1165 {
1166 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
1167 {
1168 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1169 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1170 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1171 }
1172 }
1173 else
1174 {
1175 if (DescCS.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl)
1176 {
1177 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1178 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1179 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1180 }
1181 }
1182 }
1183 else
1184 {
1185 Assert(enmBranch == IEMBRANCH_CALL);
1186 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
1187 {
1188 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1189 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1190 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1191 }
1192 }
1193
1194 /* Additional long mode checks. */
1195 if (IEM_IS_LONG_MODE(pVCpu))
1196 {
1197 if (!DescCS.Legacy.Gen.u1Long)
1198 {
1199 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS));
1200 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1201 }
1202
1203 /* L vs D. */
1204 if ( DescCS.Legacy.Gen.u1Long
1205 && DescCS.Legacy.Gen.u1DefBig)
1206 {
1207 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS));
1208 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1209 }
1210 }
1211
1212 if (!DescCS.Legacy.Gate.u1Present)
1213 {
1214 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS));
1215 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCS);
1216 }
1217
1218 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1219
1220 if (enmBranch == IEMBRANCH_JUMP)
1221 {
1222 /** @todo: This is very similar to regular far jumps; merge! */
1223 /* Jumps are fairly simple... */
1224
1225 /* Chop the high bits off if 16-bit gate (Intel says so). */
1226 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1227 uNewRip = (uint16_t)uNewRip;
1228
1229 /* Limit check for non-long segments. */
1230 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1231 if (DescCS.Legacy.Gen.u1Long)
1232 u64Base = 0;
1233 else
1234 {
1235 if (uNewRip > cbLimit)
1236 {
1237 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
1238 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1239 }
1240 u64Base = X86DESC_BASE(&DescCS.Legacy);
1241 }
1242
1243 /* Canonical address check. */
1244 if (!IEM_IS_CANONICAL(uNewRip))
1245 {
1246 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1247 return iemRaiseNotCanonical(pVCpu);
1248 }
1249
1250 /*
1251 * Ok, everything checked out fine. Now set the accessed bit before
1252 * committing the result into CS, CSHID and RIP.
1253 */
1254 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1255 {
1256 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1257 if (rcStrict != VINF_SUCCESS)
1258 return rcStrict;
1259 /** @todo check what VT-x and AMD-V does. */
1260 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1261 }
1262
1263 /* commit */
1264 pCtx->rip = uNewRip;
1265 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1266 pCtx->cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */
1267 pCtx->cs.ValidSel = pCtx->cs.Sel;
1268 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1269 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1270 pCtx->cs.u32Limit = cbLimit;
1271 pCtx->cs.u64Base = u64Base;
1272 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1273 }
1274 else
1275 {
1276 Assert(enmBranch == IEMBRANCH_CALL);
1277 /* Calls are much more complicated. */
1278
1279 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) && (DescCS.Legacy.Gen.u2Dpl < pVCpu->iem.s.uCpl))
1280 {
1281 uint16_t offNewStack; /* Offset of new stack in TSS. */
1282 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */
1283 uint8_t uNewCSDpl;
1284 uint8_t cbWords;
1285 RTSEL uNewSS;
1286 RTSEL uOldSS;
1287 uint64_t uOldRsp;
1288 IEMSELDESC DescSS;
1289 RTPTRUNION uPtrTSS;
1290 RTGCPTR GCPtrTSS;
1291 RTPTRUNION uPtrParmWds;
1292 RTGCPTR GCPtrParmWds;
1293
1294 /* More privilege. This is the fun part. */
1295 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */
1296
1297 /*
1298 * Determine new SS:rSP from the TSS.
1299 */
1300 Assert(!pCtx->tr.Attr.n.u1DescType);
1301
1302 /* Figure out where the new stack pointer is stored in the TSS. */
1303 uNewCSDpl = DescCS.Legacy.Gen.u2Dpl;
1304 if (!IEM_IS_LONG_MODE(pVCpu))
1305 {
1306 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1307 {
1308 offNewStack = RT_OFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8;
1309 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0);
1310 }
1311 else
1312 {
1313 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1314 offNewStack = RT_OFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4;
1315 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0);
1316 }
1317 }
1318 else
1319 {
1320 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1321 offNewStack = RT_OFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0);
1322 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0);
1323 }
1324
1325 /* Check against TSS limit. */
1326 if ((uint16_t)(offNewStack + cbNewStack - 1) > pCtx->tr.u32Limit)
1327 {
1328 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pCtx->tr.u32Limit));
1329 return iemRaiseTaskSwitchFaultBySelector(pVCpu, pCtx->tr.Sel);
1330 }
1331
1332 GCPtrTSS = pCtx->tr.u64Base + offNewStack;
1333 rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R);
1334 if (rcStrict != VINF_SUCCESS)
1335 {
1336 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1337 return rcStrict;
1338 }
1339
1340 if (!IEM_IS_LONG_MODE(pVCpu))
1341 {
1342 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1343 {
1344 uNewRsp = uPtrTSS.pu32[0];
1345 uNewSS = uPtrTSS.pu16[2];
1346 }
1347 else
1348 {
1349 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1350 uNewRsp = uPtrTSS.pu16[0];
1351 uNewSS = uPtrTSS.pu16[1];
1352 }
1353 }
1354 else
1355 {
1356 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1357 /* SS will be a NULL selector, but that's valid. */
1358 uNewRsp = uPtrTSS.pu64[0];
1359 uNewSS = uNewCSDpl;
1360 }
1361
1362 /* Done with the TSS now. */
1363 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);
1364 if (rcStrict != VINF_SUCCESS)
1365 {
1366 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1367 return rcStrict;
1368 }
1369
1370 /* Only used outside of long mode. */
1371 cbWords = pDesc->Legacy.Gate.u5ParmCount;
1372
1373 /* If EFER.LMA is 0, there's extra work to do. */
1374 if (!IEM_IS_LONG_MODE(pVCpu))
1375 {
1376 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
1377 {
1378 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
1379 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1380 }
1381
1382 /* Grab the new SS descriptor. */
1383 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1384 if (rcStrict != VINF_SUCCESS)
1385 return rcStrict;
1386
1387 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */
1388 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL))
1389 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl))
1390 {
1391 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n",
1392 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl));
1393 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1394 }
1395
1396 /* Ensure new SS is a writable data segment. */
1397 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
1398 {
1399 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
1400 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1401 }
1402
1403 if (!DescSS.Legacy.Gen.u1Present)
1404 {
1405 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS));
1406 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
1407 }
1408 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1409 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords);
1410 else
1411 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords);
1412 }
1413 else
1414 {
1415 /* Just grab the new (NULL) SS descriptor. */
1416 /** @todo testcase: Check whether the zero GDT entry is actually loaded here
1417 * like we do... */
1418 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1419 if (rcStrict != VINF_SUCCESS)
1420 return rcStrict;
1421
1422 cbNewStack = sizeof(uint64_t) * 4;
1423 }
1424
1425 /** @todo: According to Intel, new stack is checked for enough space first,
1426 * then switched. According to AMD, the stack is switched first and
1427 * then pushes might fault!
1428 * NB: OS/2 Warp 3/4 actively relies on the fact that possible
1429 * incoming stack #PF happens before actual stack switch. AMD is
1430 * either lying or implicitly assumes that new state is committed
1431 * only if and when an instruction doesn't fault.
1432 */
1433
1434 /** @todo: According to AMD, CS is loaded first, then SS.
1435 * According to Intel, it's the other way around!?
1436 */
1437
1438 /** @todo: Intel and AMD disagree on when exactly the CPL changes! */
1439
1440 /* Set the accessed bit before committing new SS. */
1441 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1442 {
1443 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
1444 if (rcStrict != VINF_SUCCESS)
1445 return rcStrict;
1446 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1447 }
1448
1449 /* Remember the old SS:rSP and their linear address. */
1450 uOldSS = pCtx->ss.Sel;
1451 uOldRsp = pCtx->ss.Attr.n.u1DefBig ? pCtx->rsp : pCtx->sp;
1452
1453 GCPtrParmWds = pCtx->ss.u64Base + uOldRsp;
1454
1455 /* HACK ALERT! Probe if the write to the new stack will succeed. May #SS(NewSS)
1456 or #PF, the former is not implemented in this workaround. */
1457 /** @todo Proper fix callgate target stack exceptions. */
1458 /** @todo testcase: Cover callgates with partially or fully inaccessible
1459 * target stacks. */
1460 void *pvNewFrame;
1461 RTGCPTR GCPtrNewStack = X86DESC_BASE(&DescSS.Legacy) + uNewRsp - cbNewStack;
1462 rcStrict = iemMemMap(pVCpu, &pvNewFrame, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW);
1463 if (rcStrict != VINF_SUCCESS)
1464 {
1465 Log(("BranchCallGate: Incoming stack (%04x:%08RX64) not accessible, rc=%Rrc\n", uNewSS, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
1466 return rcStrict;
1467 }
1468 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewFrame, IEM_ACCESS_SYS_RW);
1469 if (rcStrict != VINF_SUCCESS)
1470 {
1471 Log(("BranchCallGate: New stack probe unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1472 return rcStrict;
1473 }
1474
1475 /* Commit new SS:rSP. */
1476 pCtx->ss.Sel = uNewSS;
1477 pCtx->ss.ValidSel = uNewSS;
1478 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1479 pCtx->ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);
1480 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1481 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1482 pCtx->rsp = uNewRsp;
1483 pVCpu->iem.s.uCpl = uNewCSDpl;
1484 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1485 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
1486
1487 /* At this point the stack access must not fail because new state was already committed. */
1488 /** @todo this can still fail due to SS.LIMIT not check. */
1489 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbNewStack,
1490 &uPtrRet.pv, &uNewRsp);
1491 AssertMsgReturn(rcStrict == VINF_SUCCESS, ("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)),
1492 VERR_INTERNAL_ERROR_5);
1493
1494 if (!IEM_IS_LONG_MODE(pVCpu))
1495 {
1496 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1497 {
1498 /* Push the old CS:rIP. */
1499 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1500 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1501
1502 if (cbWords)
1503 {
1504 /* Map the relevant chunk of the old stack. */
1505 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1506 if (rcStrict != VINF_SUCCESS)
1507 {
1508 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1509 return rcStrict;
1510 }
1511
1512 /* Copy the parameter (d)words. */
1513 for (int i = 0; i < cbWords; ++i)
1514 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i];
1515
1516 /* Unmap the old stack. */
1517 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1518 if (rcStrict != VINF_SUCCESS)
1519 {
1520 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1521 return rcStrict;
1522 }
1523 }
1524
1525 /* Push the old SS:rSP. */
1526 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
1527 uPtrRet.pu32[2 + cbWords + 1] = uOldSS;
1528 }
1529 else
1530 {
1531 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1532
1533 /* Push the old CS:rIP. */
1534 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1535 uPtrRet.pu16[1] = pCtx->cs.Sel;
1536
1537 if (cbWords)
1538 {
1539 /* Map the relevant chunk of the old stack. */
1540 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1541 if (rcStrict != VINF_SUCCESS)
1542 {
1543 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1544 return rcStrict;
1545 }
1546
1547 /* Copy the parameter words. */
1548 for (int i = 0; i < cbWords; ++i)
1549 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i];
1550
1551 /* Unmap the old stack. */
1552 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1553 if (rcStrict != VINF_SUCCESS)
1554 {
1555 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1556 return rcStrict;
1557 }
1558 }
1559
1560 /* Push the old SS:rSP. */
1561 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp;
1562 uPtrRet.pu16[2 + cbWords + 1] = uOldSS;
1563 }
1564 }
1565 else
1566 {
1567 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1568
1569 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
1570 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1571 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1572 uPtrRet.pu64[2] = uOldRsp;
1573 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */
1574 }
1575
1576 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1577 if (rcStrict != VINF_SUCCESS)
1578 {
1579 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1580 return rcStrict;
1581 }
1582
1583 /* Chop the high bits off if 16-bit gate (Intel says so). */
1584 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1585 uNewRip = (uint16_t)uNewRip;
1586
1587 /* Limit / canonical check. */
1588 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1589 if (!IEM_IS_LONG_MODE(pVCpu))
1590 {
1591 if (uNewRip > cbLimit)
1592 {
1593 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1594 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1595 }
1596 u64Base = X86DESC_BASE(&DescCS.Legacy);
1597 }
1598 else
1599 {
1600 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1601 if (!IEM_IS_CANONICAL(uNewRip))
1602 {
1603 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1604 return iemRaiseNotCanonical(pVCpu);
1605 }
1606 u64Base = 0;
1607 }
1608
1609 /*
1610 * Now set the accessed bit before
1611 * writing the return address to the stack and committing the result into
1612 * CS, CSHID and RIP.
1613 */
1614 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1615 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1616 {
1617 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1618 if (rcStrict != VINF_SUCCESS)
1619 return rcStrict;
1620 /** @todo check what VT-x and AMD-V does. */
1621 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1622 }
1623
1624 /* Commit new CS:rIP. */
1625 pCtx->rip = uNewRip;
1626 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1627 pCtx->cs.Sel |= pVCpu->iem.s.uCpl;
1628 pCtx->cs.ValidSel = pCtx->cs.Sel;
1629 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1630 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1631 pCtx->cs.u32Limit = cbLimit;
1632 pCtx->cs.u64Base = u64Base;
1633 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1634 }
1635 else
1636 {
1637 /* Same privilege. */
1638 /** @todo: This is very similar to regular far calls; merge! */
1639
1640 /* Check stack first - may #SS(0). */
1641 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
1642 * 16-bit code cause a two or four byte CS to be pushed? */
1643 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
1644 IEM_IS_LONG_MODE(pVCpu) ? 8+8
1645 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
1646 &uPtrRet.pv, &uNewRsp);
1647 if (rcStrict != VINF_SUCCESS)
1648 return rcStrict;
1649
1650 /* Chop the high bits off if 16-bit gate (Intel says so). */
1651 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1652 uNewRip = (uint16_t)uNewRip;
1653
1654 /* Limit / canonical check. */
1655 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1656 if (!IEM_IS_LONG_MODE(pVCpu))
1657 {
1658 if (uNewRip > cbLimit)
1659 {
1660 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1661 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1662 }
1663 u64Base = X86DESC_BASE(&DescCS.Legacy);
1664 }
1665 else
1666 {
1667 if (!IEM_IS_CANONICAL(uNewRip))
1668 {
1669 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1670 return iemRaiseNotCanonical(pVCpu);
1671 }
1672 u64Base = 0;
1673 }
1674
1675 /*
1676 * Now set the accessed bit before
1677 * writing the return address to the stack and committing the result into
1678 * CS, CSHID and RIP.
1679 */
1680 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1681 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1682 {
1683 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1684 if (rcStrict != VINF_SUCCESS)
1685 return rcStrict;
1686 /** @todo check what VT-x and AMD-V does. */
1687 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1688 }
1689
1690 /* stack */
1691 if (!IEM_IS_LONG_MODE(pVCpu))
1692 {
1693 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1694 {
1695 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1696 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1697 }
1698 else
1699 {
1700 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1701 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1702 uPtrRet.pu16[1] = pCtx->cs.Sel;
1703 }
1704 }
1705 else
1706 {
1707 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1708 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1709 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1710 }
1711
1712 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1713 if (rcStrict != VINF_SUCCESS)
1714 return rcStrict;
1715
1716 /* commit */
1717 pCtx->rip = uNewRip;
1718 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1719 pCtx->cs.Sel |= pVCpu->iem.s.uCpl;
1720 pCtx->cs.ValidSel = pCtx->cs.Sel;
1721 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1722 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1723 pCtx->cs.u32Limit = cbLimit;
1724 pCtx->cs.u64Base = u64Base;
1725 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1726 }
1727 }
1728 pCtx->eflags.Bits.u1RF = 0;
1729
1730 /* Flush the prefetch buffer. */
1731# ifdef IEM_WITH_CODE_TLB
1732 pVCpu->iem.s.pbInstrBuf = NULL;
1733# else
1734 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
1735# endif
1736 return VINF_SUCCESS;
1737#endif
1738}
1739
1740
1741/**
1742 * Implements far jumps and calls thru system selectors.
1743 *
1744 * @param uSel The selector.
1745 * @param enmBranch The kind of branching we're performing.
1746 * @param enmEffOpSize The effective operand size.
1747 * @param pDesc The descriptor corresponding to @a uSel.
1748 */
1749IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1750{
1751 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1752 Assert((uSel & X86_SEL_MASK_OFF_RPL));
1753
1754 if (IEM_IS_LONG_MODE(pVCpu))
1755 switch (pDesc->Legacy.Gen.u4Type)
1756 {
1757 case AMD64_SEL_TYPE_SYS_CALL_GATE:
1758 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1759
1760 default:
1761 case AMD64_SEL_TYPE_SYS_LDT:
1762 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
1763 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
1764 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
1765 case AMD64_SEL_TYPE_SYS_INT_GATE:
1766 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1767 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1768 }
1769
1770 switch (pDesc->Legacy.Gen.u4Type)
1771 {
1772 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1773 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1774 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1775
1776 case X86_SEL_TYPE_SYS_TASK_GATE:
1777 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
1778
1779 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1780 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1781 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
1782
1783 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1784 Log(("branch %04x -> busy 286 TSS\n", uSel));
1785 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1786
1787 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1788 Log(("branch %04x -> busy 386 TSS\n", uSel));
1789 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1790
1791 default:
1792 case X86_SEL_TYPE_SYS_LDT:
1793 case X86_SEL_TYPE_SYS_286_INT_GATE:
1794 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1795 case X86_SEL_TYPE_SYS_386_INT_GATE:
1796 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1797 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1798 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1799 }
1800}
1801
1802
1803/**
1804 * Implements far jumps.
1805 *
1806 * @param uSel The selector.
1807 * @param offSeg The segment offset.
1808 * @param enmEffOpSize The effective operand size.
1809 */
1810IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1811{
1812 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1813 NOREF(cbInstr);
1814 Assert(offSeg <= UINT32_MAX);
1815
1816 /*
1817 * Real mode and V8086 mode are easy. The only snag seems to be that
1818 * CS.limit doesn't change and the limit check is done against the current
1819 * limit.
1820 */
1821 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT
1822 && IEM_IS_REAL_OR_V86_MODE(pVCpu))
1823 {
1824 if (offSeg > pCtx->cs.u32Limit)
1825 {
1826 Log(("iemCImpl_FarJmp: 16-bit limit\n"));
1827 return iemRaiseGeneralProtectionFault0(pVCpu);
1828 }
1829
1830 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
1831 pCtx->rip = offSeg;
1832 else
1833 pCtx->rip = offSeg & UINT16_MAX;
1834 pCtx->cs.Sel = uSel;
1835 pCtx->cs.ValidSel = uSel;
1836 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1837 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1838 pCtx->eflags.Bits.u1RF = 0;
1839 return VINF_SUCCESS;
1840 }
1841
1842 /*
1843 * Protected mode. Need to parse the specified descriptor...
1844 */
1845 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1846 {
1847 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1848 return iemRaiseGeneralProtectionFault0(pVCpu);
1849 }
1850
1851 /* Fetch the descriptor. */
1852 IEMSELDESC Desc;
1853 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
1854 if (rcStrict != VINF_SUCCESS)
1855 return rcStrict;
1856
1857 /* Is it there? */
1858 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
1859 {
1860 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1861 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1862 }
1863
1864 /*
1865 * Deal with it according to its type. We do the standard code selectors
1866 * here and dispatch the system selectors to worker functions.
1867 */
1868 if (!Desc.Legacy.Gen.u1DescType)
1869 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
1870
1871 /* Only code segments. */
1872 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1873 {
1874 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1875 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1876 }
1877
1878 /* L vs D. */
1879 if ( Desc.Legacy.Gen.u1Long
1880 && Desc.Legacy.Gen.u1DefBig
1881 && IEM_IS_LONG_MODE(pVCpu))
1882 {
1883 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1884 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1885 }
1886
1887 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1888 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1889 {
1890 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl)
1891 {
1892 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1893 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1894 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1895 }
1896 }
1897 else
1898 {
1899 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl)
1900 {
1901 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1902 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1903 }
1904 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl)
1905 {
1906 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl));
1907 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1908 }
1909 }
1910
1911 /* Chop the high bits if 16-bit (Intel says so). */
1912 if (enmEffOpSize == IEMMODE_16BIT)
1913 offSeg &= UINT16_MAX;
1914
1915 /* Limit check. (Should alternatively check for non-canonical addresses
1916 here, but that is ruled out by offSeg being 32-bit, right?) */
1917 uint64_t u64Base;
1918 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1919 if (Desc.Legacy.Gen.u1Long)
1920 u64Base = 0;
1921 else
1922 {
1923 if (offSeg > cbLimit)
1924 {
1925 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1926 /** @todo: Intel says this is #GP(0)! */
1927 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1928 }
1929 u64Base = X86DESC_BASE(&Desc.Legacy);
1930 }
1931
1932 /*
1933 * Ok, everything checked out fine. Now set the accessed bit before
1934 * committing the result into CS, CSHID and RIP.
1935 */
1936 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1937 {
1938 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
1939 if (rcStrict != VINF_SUCCESS)
1940 return rcStrict;
1941 /** @todo check what VT-x and AMD-V does. */
1942 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1943 }
1944
1945 /* commit */
1946 pCtx->rip = offSeg;
1947 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1948 pCtx->cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */
1949 pCtx->cs.ValidSel = pCtx->cs.Sel;
1950 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1951 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1952 pCtx->cs.u32Limit = cbLimit;
1953 pCtx->cs.u64Base = u64Base;
1954 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1955 pCtx->eflags.Bits.u1RF = 0;
1956 /** @todo check if the hidden bits are loaded correctly for 64-bit
1957 * mode. */
1958
1959 /* Flush the prefetch buffer. */
1960#ifdef IEM_WITH_CODE_TLB
1961 pVCpu->iem.s.pbInstrBuf = NULL;
1962#else
1963 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
1964#endif
1965
1966 return VINF_SUCCESS;
1967}
1968
1969
1970/**
1971 * Implements far calls.
1972 *
1973 * This very similar to iemCImpl_FarJmp.
1974 *
1975 * @param uSel The selector.
1976 * @param offSeg The segment offset.
1977 * @param enmEffOpSize The operand size (in case we need it).
1978 */
1979IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1980{
1981 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1982 VBOXSTRICTRC rcStrict;
1983 uint64_t uNewRsp;
1984 RTPTRUNION uPtrRet;
1985
1986 /*
1987 * Real mode and V8086 mode are easy. The only snag seems to be that
1988 * CS.limit doesn't change and the limit check is done against the current
1989 * limit.
1990 */
1991 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT
1992 && IEM_IS_REAL_OR_V86_MODE(pVCpu))
1993 {
1994 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1995
1996 /* Check stack first - may #SS(0). */
1997 rcStrict = iemMemStackPushBeginSpecial(pVCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1998 &uPtrRet.pv, &uNewRsp);
1999 if (rcStrict != VINF_SUCCESS)
2000 return rcStrict;
2001
2002 /* Check the target address range. */
2003 if (offSeg > UINT32_MAX)
2004 return iemRaiseGeneralProtectionFault0(pVCpu);
2005
2006 /* Everything is fine, push the return address. */
2007 if (enmEffOpSize == IEMMODE_16BIT)
2008 {
2009 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
2010 uPtrRet.pu16[1] = pCtx->cs.Sel;
2011 }
2012 else
2013 {
2014 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
2015 uPtrRet.pu16[3] = pCtx->cs.Sel;
2016 }
2017 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2018 if (rcStrict != VINF_SUCCESS)
2019 return rcStrict;
2020
2021 /* Branch. */
2022 pCtx->rip = offSeg;
2023 pCtx->cs.Sel = uSel;
2024 pCtx->cs.ValidSel = uSel;
2025 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2026 pCtx->cs.u64Base = (uint32_t)uSel << 4;
2027 pCtx->eflags.Bits.u1RF = 0;
2028 return VINF_SUCCESS;
2029 }
2030
2031 /*
2032 * Protected mode. Need to parse the specified descriptor...
2033 */
2034 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2035 {
2036 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
2037 return iemRaiseGeneralProtectionFault0(pVCpu);
2038 }
2039
2040 /* Fetch the descriptor. */
2041 IEMSELDESC Desc;
2042 rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
2043 if (rcStrict != VINF_SUCCESS)
2044 return rcStrict;
2045
2046 /*
2047 * Deal with it according to its type. We do the standard code selectors
2048 * here and dispatch the system selectors to worker functions.
2049 */
2050 if (!Desc.Legacy.Gen.u1DescType)
2051 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
2052
2053 /* Only code segments. */
2054 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2055 {
2056 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
2057 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2058 }
2059
2060 /* L vs D. */
2061 if ( Desc.Legacy.Gen.u1Long
2062 && Desc.Legacy.Gen.u1DefBig
2063 && IEM_IS_LONG_MODE(pVCpu))
2064 {
2065 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
2066 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2067 }
2068
2069 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
2070 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2071 {
2072 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl)
2073 {
2074 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
2075 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2076 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2077 }
2078 }
2079 else
2080 {
2081 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl)
2082 {
2083 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2084 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2085 }
2086 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl)
2087 {
2088 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl));
2089 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2090 }
2091 }
2092
2093 /* Is it there? */
2094 if (!Desc.Legacy.Gen.u1Present)
2095 {
2096 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
2097 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
2098 }
2099
2100 /* Check stack first - may #SS(0). */
2101 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
2102 * 16-bit code cause a two or four byte CS to be pushed? */
2103 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
2104 enmEffOpSize == IEMMODE_64BIT ? 8+8
2105 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2106 &uPtrRet.pv, &uNewRsp);
2107 if (rcStrict != VINF_SUCCESS)
2108 return rcStrict;
2109
2110 /* Chop the high bits if 16-bit (Intel says so). */
2111 if (enmEffOpSize == IEMMODE_16BIT)
2112 offSeg &= UINT16_MAX;
2113
2114 /* Limit / canonical check. */
2115 uint64_t u64Base;
2116 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2117 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2118 {
2119 if (!IEM_IS_CANONICAL(offSeg))
2120 {
2121 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
2122 return iemRaiseNotCanonical(pVCpu);
2123 }
2124 u64Base = 0;
2125 }
2126 else
2127 {
2128 if (offSeg > cbLimit)
2129 {
2130 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
2131 /** @todo: Intel says this is #GP(0)! */
2132 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2133 }
2134 u64Base = X86DESC_BASE(&Desc.Legacy);
2135 }
2136
2137 /*
2138 * Now set the accessed bit before
2139 * writing the return address to the stack and committing the result into
2140 * CS, CSHID and RIP.
2141 */
2142 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2143 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2144 {
2145 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2146 if (rcStrict != VINF_SUCCESS)
2147 return rcStrict;
2148 /** @todo check what VT-x and AMD-V does. */
2149 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2150 }
2151
2152 /* stack */
2153 if (enmEffOpSize == IEMMODE_16BIT)
2154 {
2155 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
2156 uPtrRet.pu16[1] = pCtx->cs.Sel;
2157 }
2158 else if (enmEffOpSize == IEMMODE_32BIT)
2159 {
2160 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
2161 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
2162 }
2163 else
2164 {
2165 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
2166 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
2167 }
2168 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2169 if (rcStrict != VINF_SUCCESS)
2170 return rcStrict;
2171
2172 /* commit */
2173 pCtx->rip = offSeg;
2174 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2175 pCtx->cs.Sel |= pVCpu->iem.s.uCpl;
2176 pCtx->cs.ValidSel = pCtx->cs.Sel;
2177 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2178 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2179 pCtx->cs.u32Limit = cbLimit;
2180 pCtx->cs.u64Base = u64Base;
2181 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
2182 pCtx->eflags.Bits.u1RF = 0;
2183 /** @todo check if the hidden bits are loaded correctly for 64-bit
2184 * mode. */
2185
2186 /* Flush the prefetch buffer. */
2187#ifdef IEM_WITH_CODE_TLB
2188 pVCpu->iem.s.pbInstrBuf = NULL;
2189#else
2190 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2191#endif
2192 return VINF_SUCCESS;
2193}
2194
2195
2196/**
2197 * Implements retf.
2198 *
2199 * @param enmEffOpSize The effective operand size.
2200 * @param cbPop The amount of arguments to pop from the stack
2201 * (bytes).
2202 */
2203IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2204{
2205 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2206 VBOXSTRICTRC rcStrict;
2207 RTCPTRUNION uPtrFrame;
2208 uint64_t uNewRsp;
2209 uint64_t uNewRip;
2210 uint16_t uNewCs;
2211 NOREF(cbInstr);
2212
2213 /*
2214 * Read the stack values first.
2215 */
2216 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
2217 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
2218 rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
2219 if (rcStrict != VINF_SUCCESS)
2220 return rcStrict;
2221 if (enmEffOpSize == IEMMODE_16BIT)
2222 {
2223 uNewRip = uPtrFrame.pu16[0];
2224 uNewCs = uPtrFrame.pu16[1];
2225 }
2226 else if (enmEffOpSize == IEMMODE_32BIT)
2227 {
2228 uNewRip = uPtrFrame.pu32[0];
2229 uNewCs = uPtrFrame.pu16[2];
2230 }
2231 else
2232 {
2233 uNewRip = uPtrFrame.pu64[0];
2234 uNewCs = uPtrFrame.pu16[4];
2235 }
2236 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2237 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2238 { /* extremely likely */ }
2239 else
2240 return rcStrict;
2241
2242 /*
2243 * Real mode and V8086 mode are easy.
2244 */
2245 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT
2246 && IEM_IS_REAL_OR_V86_MODE(pVCpu))
2247 {
2248 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2249 /** @todo check how this is supposed to work if sp=0xfffe. */
2250
2251 /* Check the limit of the new EIP. */
2252 /** @todo Intel pseudo code only does the limit check for 16-bit
2253 * operands, AMD does not make any distinction. What is right? */
2254 if (uNewRip > pCtx->cs.u32Limit)
2255 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2256
2257 /* commit the operation. */
2258 pCtx->rsp = uNewRsp;
2259 pCtx->rip = uNewRip;
2260 pCtx->cs.Sel = uNewCs;
2261 pCtx->cs.ValidSel = uNewCs;
2262 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2263 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2264 pCtx->eflags.Bits.u1RF = 0;
2265 /** @todo do we load attribs and limit as well? */
2266 if (cbPop)
2267 iemRegAddToRsp(pVCpu, pCtx, cbPop);
2268 return VINF_SUCCESS;
2269 }
2270
2271 /*
2272 * Protected mode is complicated, of course.
2273 */
2274 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2275 {
2276 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
2277 return iemRaiseGeneralProtectionFault0(pVCpu);
2278 }
2279
2280 /* Fetch the descriptor. */
2281 IEMSELDESC DescCs;
2282 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCs, uNewCs, X86_XCPT_GP);
2283 if (rcStrict != VINF_SUCCESS)
2284 return rcStrict;
2285
2286 /* Can only return to a code selector. */
2287 if ( !DescCs.Legacy.Gen.u1DescType
2288 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
2289 {
2290 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
2291 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
2292 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2293 }
2294
2295 /* L vs D. */
2296 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
2297 && DescCs.Legacy.Gen.u1DefBig
2298 && IEM_IS_LONG_MODE(pVCpu))
2299 {
2300 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
2301 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2302 }
2303
2304 /* DPL/RPL/CPL checks. */
2305 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
2306 {
2307 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl));
2308 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2309 }
2310
2311 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2312 {
2313 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
2314 {
2315 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
2316 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2317 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2318 }
2319 }
2320 else
2321 {
2322 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
2323 {
2324 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
2325 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2326 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2327 }
2328 }
2329
2330 /* Is it there? */
2331 if (!DescCs.Legacy.Gen.u1Present)
2332 {
2333 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
2334 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2335 }
2336
2337 /*
2338 * Return to outer privilege? (We'll typically have entered via a call gate.)
2339 */
2340 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
2341 {
2342 /* Read the outer stack pointer stored *after* the parameters. */
2343 rcStrict = iemMemStackPopContinueSpecial(pVCpu, cbPop + cbRetPtr, &uPtrFrame.pv, &uNewRsp);
2344 if (rcStrict != VINF_SUCCESS)
2345 return rcStrict;
2346
2347 uPtrFrame.pu8 += cbPop; /* Skip the parameters. */
2348
2349 uint16_t uNewOuterSs;
2350 uint64_t uNewOuterRsp;
2351 if (enmEffOpSize == IEMMODE_16BIT)
2352 {
2353 uNewOuterRsp = uPtrFrame.pu16[0];
2354 uNewOuterSs = uPtrFrame.pu16[1];
2355 }
2356 else if (enmEffOpSize == IEMMODE_32BIT)
2357 {
2358 uNewOuterRsp = uPtrFrame.pu32[0];
2359 uNewOuterSs = uPtrFrame.pu16[2];
2360 }
2361 else
2362 {
2363 uNewOuterRsp = uPtrFrame.pu64[0];
2364 uNewOuterSs = uPtrFrame.pu16[4];
2365 }
2366 uPtrFrame.pu8 -= cbPop; /* Put uPtrFrame back the way it was. */
2367 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2368 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2369 { /* extremely likely */ }
2370 else
2371 return rcStrict;
2372
2373 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
2374 and read the selector. */
2375 IEMSELDESC DescSs;
2376 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
2377 {
2378 if ( !DescCs.Legacy.Gen.u1Long
2379 || (uNewOuterSs & X86_SEL_RPL) == 3)
2380 {
2381 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
2382 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2383 return iemRaiseGeneralProtectionFault0(pVCpu);
2384 }
2385 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
2386 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
2387 }
2388 else
2389 {
2390 /* Fetch the descriptor for the new stack segment. */
2391 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);
2392 if (rcStrict != VINF_SUCCESS)
2393 return rcStrict;
2394 }
2395
2396 /* Check that RPL of stack and code selectors match. */
2397 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
2398 {
2399 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2400 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2401 }
2402
2403 /* Must be a writable data segment. */
2404 if ( !DescSs.Legacy.Gen.u1DescType
2405 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2406 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2407 {
2408 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
2409 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
2410 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2411 }
2412
2413 /* L vs D. (Not mentioned by intel.) */
2414 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
2415 && DescSs.Legacy.Gen.u1DefBig
2416 && IEM_IS_LONG_MODE(pVCpu))
2417 {
2418 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
2419 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2420 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2421 }
2422
2423 /* DPL/RPL/CPL checks. */
2424 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2425 {
2426 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
2427 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
2428 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2429 }
2430
2431 /* Is it there? */
2432 if (!DescSs.Legacy.Gen.u1Present)
2433 {
2434 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2435 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2436 }
2437
2438 /* Calc SS limit.*/
2439 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
2440
2441 /* Is RIP canonical or within CS.limit? */
2442 uint64_t u64Base;
2443 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2444
2445 /** @todo Testcase: Is this correct? */
2446 if ( DescCs.Legacy.Gen.u1Long
2447 && IEM_IS_LONG_MODE(pVCpu) )
2448 {
2449 if (!IEM_IS_CANONICAL(uNewRip))
2450 {
2451 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2452 return iemRaiseNotCanonical(pVCpu);
2453 }
2454 u64Base = 0;
2455 }
2456 else
2457 {
2458 if (uNewRip > cbLimitCs)
2459 {
2460 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
2461 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
2462 /** @todo: Intel says this is #GP(0)! */
2463 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2464 }
2465 u64Base = X86DESC_BASE(&DescCs.Legacy);
2466 }
2467
2468 /*
2469 * Now set the accessed bit before
2470 * writing the return address to the stack and committing the result into
2471 * CS, CSHID and RIP.
2472 */
2473 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
2474 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2475 {
2476 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2477 if (rcStrict != VINF_SUCCESS)
2478 return rcStrict;
2479 /** @todo check what VT-x and AMD-V does. */
2480 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2481 }
2482 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
2483 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2484 {
2485 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewOuterSs);
2486 if (rcStrict != VINF_SUCCESS)
2487 return rcStrict;
2488 /** @todo check what VT-x and AMD-V does. */
2489 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2490 }
2491
2492 /* commit */
2493 if (enmEffOpSize == IEMMODE_16BIT)
2494 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2495 else
2496 pCtx->rip = uNewRip;
2497 pCtx->cs.Sel = uNewCs;
2498 pCtx->cs.ValidSel = uNewCs;
2499 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2500 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2501 pCtx->cs.u32Limit = cbLimitCs;
2502 pCtx->cs.u64Base = u64Base;
2503 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
2504 pCtx->ss.Sel = uNewOuterSs;
2505 pCtx->ss.ValidSel = uNewOuterSs;
2506 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2507 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
2508 pCtx->ss.u32Limit = cbLimitSs;
2509 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2510 pCtx->ss.u64Base = 0;
2511 else
2512 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
2513 if (!pCtx->ss.Attr.n.u1DefBig)
2514 pCtx->sp = (uint16_t)uNewOuterRsp;
2515 else
2516 pCtx->rsp = uNewOuterRsp;
2517
2518 pVCpu->iem.s.uCpl = (uNewCs & X86_SEL_RPL);
2519 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2520 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2521 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2522 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2523
2524 /** @todo check if the hidden bits are loaded correctly for 64-bit
2525 * mode. */
2526
2527 if (cbPop)
2528 iemRegAddToRsp(pVCpu, pCtx, cbPop);
2529 pCtx->eflags.Bits.u1RF = 0;
2530
2531 /* Done! */
2532 }
2533 /*
2534 * Return to the same privilege level
2535 */
2536 else
2537 {
2538 /* Limit / canonical check. */
2539 uint64_t u64Base;
2540 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2541
2542 /** @todo Testcase: Is this correct? */
2543 if ( DescCs.Legacy.Gen.u1Long
2544 && IEM_IS_LONG_MODE(pVCpu) )
2545 {
2546 if (!IEM_IS_CANONICAL(uNewRip))
2547 {
2548 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
2549 return iemRaiseNotCanonical(pVCpu);
2550 }
2551 u64Base = 0;
2552 }
2553 else
2554 {
2555 if (uNewRip > cbLimitCs)
2556 {
2557 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
2558 /** @todo: Intel says this is #GP(0)! */
2559 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2560 }
2561 u64Base = X86DESC_BASE(&DescCs.Legacy);
2562 }
2563
2564 /*
2565 * Now set the accessed bit before
2566 * writing the return address to the stack and committing the result into
2567 * CS, CSHID and RIP.
2568 */
2569 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2570 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2571 {
2572 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2573 if (rcStrict != VINF_SUCCESS)
2574 return rcStrict;
2575 /** @todo check what VT-x and AMD-V does. */
2576 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2577 }
2578
2579 /* commit */
2580 if (!pCtx->ss.Attr.n.u1DefBig)
2581 pCtx->sp = (uint16_t)uNewRsp;
2582 else
2583 pCtx->rsp = uNewRsp;
2584 if (enmEffOpSize == IEMMODE_16BIT)
2585 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2586 else
2587 pCtx->rip = uNewRip;
2588 pCtx->cs.Sel = uNewCs;
2589 pCtx->cs.ValidSel = uNewCs;
2590 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2591 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2592 pCtx->cs.u32Limit = cbLimitCs;
2593 pCtx->cs.u64Base = u64Base;
2594 /** @todo check if the hidden bits are loaded correctly for 64-bit
2595 * mode. */
2596 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
2597 if (cbPop)
2598 iemRegAddToRsp(pVCpu, pCtx, cbPop);
2599 pCtx->eflags.Bits.u1RF = 0;
2600 }
2601
2602 /* Flush the prefetch buffer. */
2603#ifdef IEM_WITH_CODE_TLB
2604 pVCpu->iem.s.pbInstrBuf = NULL;
2605#else
2606 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2607#endif
2608 return VINF_SUCCESS;
2609}
2610
2611
2612/**
2613 * Implements retn.
2614 *
2615 * We're doing this in C because of the \#GP that might be raised if the popped
2616 * program counter is out of bounds.
2617 *
2618 * @param enmEffOpSize The effective operand size.
2619 * @param cbPop The amount of arguments to pop from the stack
2620 * (bytes).
2621 */
2622IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2623{
2624 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2625 NOREF(cbInstr);
2626
2627 /* Fetch the RSP from the stack. */
2628 VBOXSTRICTRC rcStrict;
2629 RTUINT64U NewRip;
2630 RTUINT64U NewRsp;
2631 NewRsp.u = pCtx->rsp;
2632 switch (enmEffOpSize)
2633 {
2634 case IEMMODE_16BIT:
2635 NewRip.u = 0;
2636 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRip.Words.w0, &NewRsp);
2637 break;
2638 case IEMMODE_32BIT:
2639 NewRip.u = 0;
2640 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRip.DWords.dw0, &NewRsp);
2641 break;
2642 case IEMMODE_64BIT:
2643 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRip.u, &NewRsp);
2644 break;
2645 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2646 }
2647 if (rcStrict != VINF_SUCCESS)
2648 return rcStrict;
2649
2650 /* Check the new RSP before loading it. */
2651 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
2652 * of it. The canonical test is performed here and for call. */
2653 if (enmEffOpSize != IEMMODE_64BIT)
2654 {
2655 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
2656 {
2657 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
2658 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2659 }
2660 }
2661 else
2662 {
2663 if (!IEM_IS_CANONICAL(NewRip.u))
2664 {
2665 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
2666 return iemRaiseNotCanonical(pVCpu);
2667 }
2668 }
2669
2670 /* Apply cbPop */
2671 if (cbPop)
2672 iemRegAddToRspEx(pVCpu, pCtx, &NewRsp, cbPop);
2673
2674 /* Commit it. */
2675 pCtx->rip = NewRip.u;
2676 pCtx->rsp = NewRsp.u;
2677 pCtx->eflags.Bits.u1RF = 0;
2678
2679 /* Flush the prefetch buffer. */
2680#ifndef IEM_WITH_CODE_TLB
2681 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2682#endif
2683
2684 return VINF_SUCCESS;
2685}
2686
2687
2688/**
2689 * Implements enter.
2690 *
2691 * We're doing this in C because the instruction is insane, even for the
2692 * u8NestingLevel=0 case dealing with the stack is tedious.
2693 *
2694 * @param enmEffOpSize The effective operand size.
2695 */
2696IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
2697{
2698 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2699
2700 /* Push RBP, saving the old value in TmpRbp. */
2701 RTUINT64U NewRsp; NewRsp.u = pCtx->rsp;
2702 RTUINT64U TmpRbp; TmpRbp.u = pCtx->rbp;
2703 RTUINT64U NewRbp;
2704 VBOXSTRICTRC rcStrict;
2705 if (enmEffOpSize == IEMMODE_64BIT)
2706 {
2707 rcStrict = iemMemStackPushU64Ex(pVCpu, TmpRbp.u, &NewRsp);
2708 NewRbp = NewRsp;
2709 }
2710 else if (enmEffOpSize == IEMMODE_32BIT)
2711 {
2712 rcStrict = iemMemStackPushU32Ex(pVCpu, TmpRbp.DWords.dw0, &NewRsp);
2713 NewRbp = NewRsp;
2714 }
2715 else
2716 {
2717 rcStrict = iemMemStackPushU16Ex(pVCpu, TmpRbp.Words.w0, &NewRsp);
2718 NewRbp = TmpRbp;
2719 NewRbp.Words.w0 = NewRsp.Words.w0;
2720 }
2721 if (rcStrict != VINF_SUCCESS)
2722 return rcStrict;
2723
2724 /* Copy the parameters (aka nesting levels by Intel). */
2725 cParameters &= 0x1f;
2726 if (cParameters > 0)
2727 {
2728 switch (enmEffOpSize)
2729 {
2730 case IEMMODE_16BIT:
2731 if (pCtx->ss.Attr.n.u1DefBig)
2732 TmpRbp.DWords.dw0 -= 2;
2733 else
2734 TmpRbp.Words.w0 -= 2;
2735 do
2736 {
2737 uint16_t u16Tmp;
2738 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Tmp, &TmpRbp);
2739 if (rcStrict != VINF_SUCCESS)
2740 break;
2741 rcStrict = iemMemStackPushU16Ex(pVCpu, u16Tmp, &NewRsp);
2742 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2743 break;
2744
2745 case IEMMODE_32BIT:
2746 if (pCtx->ss.Attr.n.u1DefBig)
2747 TmpRbp.DWords.dw0 -= 4;
2748 else
2749 TmpRbp.Words.w0 -= 4;
2750 do
2751 {
2752 uint32_t u32Tmp;
2753 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Tmp, &TmpRbp);
2754 if (rcStrict != VINF_SUCCESS)
2755 break;
2756 rcStrict = iemMemStackPushU32Ex(pVCpu, u32Tmp, &NewRsp);
2757 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2758 break;
2759
2760 case IEMMODE_64BIT:
2761 TmpRbp.u -= 8;
2762 do
2763 {
2764 uint64_t u64Tmp;
2765 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Tmp, &TmpRbp);
2766 if (rcStrict != VINF_SUCCESS)
2767 break;
2768 rcStrict = iemMemStackPushU64Ex(pVCpu, u64Tmp, &NewRsp);
2769 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2770 break;
2771
2772 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2773 }
2774 if (rcStrict != VINF_SUCCESS)
2775 return VINF_SUCCESS;
2776
2777 /* Push the new RBP */
2778 if (enmEffOpSize == IEMMODE_64BIT)
2779 rcStrict = iemMemStackPushU64Ex(pVCpu, NewRbp.u, &NewRsp);
2780 else if (enmEffOpSize == IEMMODE_32BIT)
2781 rcStrict = iemMemStackPushU32Ex(pVCpu, NewRbp.DWords.dw0, &NewRsp);
2782 else
2783 rcStrict = iemMemStackPushU16Ex(pVCpu, NewRbp.Words.w0, &NewRsp);
2784 if (rcStrict != VINF_SUCCESS)
2785 return rcStrict;
2786
2787 }
2788
2789 /* Recalc RSP. */
2790 iemRegSubFromRspEx(pVCpu, pCtx, &NewRsp, cbFrame);
2791
2792 /** @todo Should probe write access at the new RSP according to AMD. */
2793
2794 /* Commit it. */
2795 pCtx->rbp = NewRbp.u;
2796 pCtx->rsp = NewRsp.u;
2797 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2798
2799 return VINF_SUCCESS;
2800}
2801
2802
2803
2804/**
2805 * Implements leave.
2806 *
2807 * We're doing this in C because messing with the stack registers is annoying
2808 * since they depends on SS attributes.
2809 *
2810 * @param enmEffOpSize The effective operand size.
2811 */
2812IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
2813{
2814 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2815
2816 /* Calculate the intermediate RSP from RBP and the stack attributes. */
2817 RTUINT64U NewRsp;
2818 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2819 NewRsp.u = pCtx->rbp;
2820 else if (pCtx->ss.Attr.n.u1DefBig)
2821 NewRsp.u = pCtx->ebp;
2822 else
2823 {
2824 /** @todo Check that LEAVE actually preserve the high EBP bits. */
2825 NewRsp.u = pCtx->rsp;
2826 NewRsp.Words.w0 = pCtx->bp;
2827 }
2828
2829 /* Pop RBP according to the operand size. */
2830 VBOXSTRICTRC rcStrict;
2831 RTUINT64U NewRbp;
2832 switch (enmEffOpSize)
2833 {
2834 case IEMMODE_16BIT:
2835 NewRbp.u = pCtx->rbp;
2836 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRbp.Words.w0, &NewRsp);
2837 break;
2838 case IEMMODE_32BIT:
2839 NewRbp.u = 0;
2840 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRbp.DWords.dw0, &NewRsp);
2841 break;
2842 case IEMMODE_64BIT:
2843 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRbp.u, &NewRsp);
2844 break;
2845 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2846 }
2847 if (rcStrict != VINF_SUCCESS)
2848 return rcStrict;
2849
2850
2851 /* Commit it. */
2852 pCtx->rbp = NewRbp.u;
2853 pCtx->rsp = NewRsp.u;
2854 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2855
2856 return VINF_SUCCESS;
2857}
2858
2859
2860/**
2861 * Implements int3 and int XX.
2862 *
2863 * @param u8Int The interrupt vector number.
2864 * @param enmInt The int instruction type.
2865 */
2866IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt)
2867{
2868 Assert(pVCpu->iem.s.cXcptRecursions == 0);
2869 return iemRaiseXcptOrInt(pVCpu,
2870 cbInstr,
2871 u8Int,
2872 IEM_XCPT_FLAGS_T_SOFT_INT | enmInt,
2873 0,
2874 0);
2875}
2876
2877
2878/**
2879 * Implements iret for real mode and V8086 mode.
2880 *
2881 * @param enmEffOpSize The effective operand size.
2882 */
2883IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
2884{
2885 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2886 X86EFLAGS Efl;
2887 Efl.u = IEMMISC_GET_EFL(pVCpu, pCtx);
2888 NOREF(cbInstr);
2889
2890 /*
2891 * iret throws an exception if VME isn't enabled.
2892 */
2893 if ( Efl.Bits.u1VM
2894 && Efl.Bits.u2IOPL != 3
2895 && !(pCtx->cr4 & X86_CR4_VME))
2896 return iemRaiseGeneralProtectionFault0(pVCpu);
2897
2898 /*
2899 * Do the stack bits, but don't commit RSP before everything checks
2900 * out right.
2901 */
2902 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2903 VBOXSTRICTRC rcStrict;
2904 RTCPTRUNION uFrame;
2905 uint16_t uNewCs;
2906 uint32_t uNewEip;
2907 uint32_t uNewFlags;
2908 uint64_t uNewRsp;
2909 if (enmEffOpSize == IEMMODE_32BIT)
2910 {
2911 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, &uFrame.pv, &uNewRsp);
2912 if (rcStrict != VINF_SUCCESS)
2913 return rcStrict;
2914 uNewEip = uFrame.pu32[0];
2915 if (uNewEip > UINT16_MAX)
2916 return iemRaiseGeneralProtectionFault0(pVCpu);
2917
2918 uNewCs = (uint16_t)uFrame.pu32[1];
2919 uNewFlags = uFrame.pu32[2];
2920 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2921 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
2922 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
2923 | X86_EFL_ID;
2924 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
2925 uNewFlags &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
2926 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
2927 }
2928 else
2929 {
2930 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, &uFrame.pv, &uNewRsp);
2931 if (rcStrict != VINF_SUCCESS)
2932 return rcStrict;
2933 uNewEip = uFrame.pu16[0];
2934 uNewCs = uFrame.pu16[1];
2935 uNewFlags = uFrame.pu16[2];
2936 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2937 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
2938 uNewFlags |= Efl.u & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF);
2939 /** @todo The intel pseudo code does not indicate what happens to
2940 * reserved flags. We just ignore them. */
2941 /* Ancient CPU adjustments: See iemCImpl_popf. */
2942 if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286)
2943 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL);
2944 }
2945 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uFrame.pv);
2946 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2947 { /* extremely likely */ }
2948 else
2949 return rcStrict;
2950
2951 /** @todo Check how this is supposed to work if sp=0xfffe. */
2952 Log7(("iemCImpl_iret_real_v8086: uNewCs=%#06x uNewRip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n",
2953 uNewCs, uNewEip, uNewFlags, uNewRsp));
2954
2955 /*
2956 * Check the limit of the new EIP.
2957 */
2958 /** @todo Only the AMD pseudo code check the limit here, what's
2959 * right? */
2960 if (uNewEip > pCtx->cs.u32Limit)
2961 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2962
2963 /*
2964 * V8086 checks and flag adjustments
2965 */
2966 if (Efl.Bits.u1VM)
2967 {
2968 if (Efl.Bits.u2IOPL == 3)
2969 {
2970 /* Preserve IOPL and clear RF. */
2971 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
2972 uNewFlags |= Efl.u & (X86_EFL_IOPL);
2973 }
2974 else if ( enmEffOpSize == IEMMODE_16BIT
2975 && ( !(uNewFlags & X86_EFL_IF)
2976 || !Efl.Bits.u1VIP )
2977 && !(uNewFlags & X86_EFL_TF) )
2978 {
2979 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
2980 uNewFlags &= ~X86_EFL_VIF;
2981 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
2982 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
2983 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
2984 }
2985 else
2986 return iemRaiseGeneralProtectionFault0(pVCpu);
2987 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags));
2988 }
2989
2990 /*
2991 * Commit the operation.
2992 */
2993#ifdef DBGFTRACE_ENABLED
2994 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
2995 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewRsp);
2996#endif
2997 pCtx->rsp = uNewRsp;
2998 pCtx->rip = uNewEip;
2999 pCtx->cs.Sel = uNewCs;
3000 pCtx->cs.ValidSel = uNewCs;
3001 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3002 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
3003 /** @todo do we load attribs and limit as well? */
3004 Assert(uNewFlags & X86_EFL_1);
3005 IEMMISC_SET_EFL(pVCpu, pCtx, uNewFlags);
3006
3007 /* Flush the prefetch buffer. */
3008#ifdef IEM_WITH_CODE_TLB
3009 pVCpu->iem.s.pbInstrBuf = NULL;
3010#else
3011 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3012#endif
3013
3014 return VINF_SUCCESS;
3015}
3016
3017
3018/**
3019 * Loads a segment register when entering V8086 mode.
3020 *
3021 * @param pSReg The segment register.
3022 * @param uSeg The segment to load.
3023 */
3024static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
3025{
3026 pSReg->Sel = uSeg;
3027 pSReg->ValidSel = uSeg;
3028 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3029 pSReg->u64Base = (uint32_t)uSeg << 4;
3030 pSReg->u32Limit = 0xffff;
3031 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
3032 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
3033 * IRET'ing to V8086. */
3034}
3035
3036
3037/**
3038 * Implements iret for protected mode returning to V8086 mode.
3039 *
3040 * @param pCtx Pointer to the CPU context.
3041 * @param uNewEip The new EIP.
3042 * @param uNewCs The new CS.
3043 * @param uNewFlags The new EFLAGS.
3044 * @param uNewRsp The RSP after the initial IRET frame.
3045 *
3046 * @note This can only be a 32-bit iret du to the X86_EFL_VM position.
3047 */
3048IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uint16_t, uNewCs,
3049 uint32_t, uNewFlags, uint64_t, uNewRsp)
3050{
3051 RT_NOREF_PV(cbInstr);
3052
3053 /*
3054 * Pop the V8086 specific frame bits off the stack.
3055 */
3056 VBOXSTRICTRC rcStrict;
3057 RTCPTRUNION uFrame;
3058 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 24, &uFrame.pv, &uNewRsp);
3059 if (rcStrict != VINF_SUCCESS)
3060 return rcStrict;
3061 uint32_t uNewEsp = uFrame.pu32[0];
3062 uint16_t uNewSs = uFrame.pu32[1];
3063 uint16_t uNewEs = uFrame.pu32[2];
3064 uint16_t uNewDs = uFrame.pu32[3];
3065 uint16_t uNewFs = uFrame.pu32[4];
3066 uint16_t uNewGs = uFrame.pu32[5];
3067 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3068 if (rcStrict != VINF_SUCCESS)
3069 return rcStrict;
3070
3071 /*
3072 * Commit the operation.
3073 */
3074 uNewFlags &= X86_EFL_LIVE_MASK;
3075 uNewFlags |= X86_EFL_RA1_MASK;
3076#ifdef DBGFTRACE_ENABLED
3077 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
3078 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);
3079#endif
3080 Log7(("iemCImpl_iret_prot_v8086: %04x:%08x -> %04x:%04x %x %04x:%04x\n", pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp));
3081
3082 IEMMISC_SET_EFL(pVCpu, pCtx, uNewFlags);
3083 iemCImplCommonV8086LoadSeg(&pCtx->cs, uNewCs);
3084 iemCImplCommonV8086LoadSeg(&pCtx->ss, uNewSs);
3085 iemCImplCommonV8086LoadSeg(&pCtx->es, uNewEs);
3086 iemCImplCommonV8086LoadSeg(&pCtx->ds, uNewDs);
3087 iemCImplCommonV8086LoadSeg(&pCtx->fs, uNewFs);
3088 iemCImplCommonV8086LoadSeg(&pCtx->gs, uNewGs);
3089 pCtx->rip = (uint16_t)uNewEip;
3090 pCtx->rsp = uNewEsp; /** @todo check this out! */
3091 pVCpu->iem.s.uCpl = 3;
3092
3093 /* Flush the prefetch buffer. */
3094#ifdef IEM_WITH_CODE_TLB
3095 pVCpu->iem.s.pbInstrBuf = NULL;
3096#else
3097 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3098#endif
3099
3100 return VINF_SUCCESS;
3101}
3102
3103
3104/**
3105 * Implements iret for protected mode returning via a nested task.
3106 *
3107 * @param enmEffOpSize The effective operand size.
3108 */
3109IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
3110{
3111 Log7(("iemCImpl_iret_prot_NestedTask:\n"));
3112#ifndef IEM_IMPLEMENTS_TASKSWITCH
3113 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
3114#else
3115 RT_NOREF_PV(enmEffOpSize);
3116
3117 /*
3118 * Read the segment selector in the link-field of the current TSS.
3119 */
3120 RTSEL uSelRet;
3121 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
3122 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &uSelRet, UINT8_MAX, pCtx->tr.u64Base);
3123 if (rcStrict != VINF_SUCCESS)
3124 return rcStrict;
3125
3126 /*
3127 * Fetch the returning task's TSS descriptor from the GDT.
3128 */
3129 if (uSelRet & X86_SEL_LDT)
3130 {
3131 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet));
3132 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet);
3133 }
3134
3135 IEMSELDESC TssDesc;
3136 rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelRet, X86_XCPT_GP);
3137 if (rcStrict != VINF_SUCCESS)
3138 return rcStrict;
3139
3140 if (TssDesc.Legacy.Gate.u1DescType)
3141 {
3142 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet));
3143 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3144 }
3145
3146 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
3147 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
3148 {
3149 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
3150 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3151 }
3152
3153 if (!TssDesc.Legacy.Gate.u1Present)
3154 {
3155 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet));
3156 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3157 }
3158
3159 uint32_t uNextEip = pCtx->eip + cbInstr;
3160 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
3161 0 /* uCr2 */, uSelRet, &TssDesc);
3162#endif
3163}
3164
3165
3166/**
3167 * Implements iret for protected mode
3168 *
3169 * @param enmEffOpSize The effective operand size.
3170 */
3171IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
3172{
3173 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
3174 NOREF(cbInstr);
3175 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3176
3177 /*
3178 * Nested task return.
3179 */
3180 if (pCtx->eflags.Bits.u1NT)
3181 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
3182
3183 /*
3184 * Normal return.
3185 *
3186 * Do the stack bits, but don't commit RSP before everything checks
3187 * out right.
3188 */
3189 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3190 VBOXSTRICTRC rcStrict;
3191 RTCPTRUNION uFrame;
3192 uint16_t uNewCs;
3193 uint32_t uNewEip;
3194 uint32_t uNewFlags;
3195 uint64_t uNewRsp;
3196 if (enmEffOpSize == IEMMODE_32BIT)
3197 {
3198 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, &uFrame.pv, &uNewRsp);
3199 if (rcStrict != VINF_SUCCESS)
3200 return rcStrict;
3201 uNewEip = uFrame.pu32[0];
3202 uNewCs = (uint16_t)uFrame.pu32[1];
3203 uNewFlags = uFrame.pu32[2];
3204 }
3205 else
3206 {
3207 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, &uFrame.pv, &uNewRsp);
3208 if (rcStrict != VINF_SUCCESS)
3209 return rcStrict;
3210 uNewEip = uFrame.pu16[0];
3211 uNewCs = uFrame.pu16[1];
3212 uNewFlags = uFrame.pu16[2];
3213 }
3214 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3215 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3216 { /* extremely likely */ }
3217 else
3218 return rcStrict;
3219 Log7(("iemCImpl_iret_prot: uNewCs=%#06x uNewEip=%#010x uNewFlags=%#x uNewRsp=%#18llx uCpl=%u\n", uNewCs, uNewEip, uNewFlags, uNewRsp, pVCpu->iem.s.uCpl));
3220
3221 /*
3222 * We're hopefully not returning to V8086 mode...
3223 */
3224 if ( (uNewFlags & X86_EFL_VM)
3225 && pVCpu->iem.s.uCpl == 0)
3226 {
3227 Assert(enmEffOpSize == IEMMODE_32BIT);
3228 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, pCtx, uNewEip, uNewCs, uNewFlags, uNewRsp);
3229 }
3230
3231 /*
3232 * Protected mode.
3233 */
3234 /* Read the CS descriptor. */
3235 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3236 {
3237 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
3238 return iemRaiseGeneralProtectionFault0(pVCpu);
3239 }
3240
3241 IEMSELDESC DescCS;
3242 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3243 if (rcStrict != VINF_SUCCESS)
3244 {
3245 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
3246 return rcStrict;
3247 }
3248
3249 /* Must be a code descriptor. */
3250 if (!DescCS.Legacy.Gen.u1DescType)
3251 {
3252 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3253 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3254 }
3255 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3256 {
3257 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3258 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3259 }
3260
3261#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3262 /* Raw ring-0 and ring-1 compression adjustments for PATM performance tricks and other CS leaks. */
3263 PVM pVM = pVCpu->CTX_SUFF(pVM);
3264 if (EMIsRawRing0Enabled(pVM) && !HMIsEnabled(pVM))
3265 {
3266 if ((uNewCs & X86_SEL_RPL) == 1)
3267 {
3268 if ( pVCpu->iem.s.uCpl == 0
3269 && ( !EMIsRawRing1Enabled(pVM)
3270 || pCtx->cs.Sel == (uNewCs & X86_SEL_MASK_OFF_RPL)) )
3271 {
3272 Log(("iret: Ring-0 compression fix: uNewCS=%#x -> %#x\n", uNewCs, uNewCs & X86_SEL_MASK_OFF_RPL));
3273 uNewCs &= X86_SEL_MASK_OFF_RPL;
3274 }
3275# ifdef LOG_ENABLED
3276 else if (pVCpu->iem.s.uCpl <= 1 && EMIsRawRing1Enabled(pVM))
3277 Log(("iret: uNewCs=%#x genuine return to ring-1.\n", uNewCs));
3278# endif
3279 }
3280 else if ( (uNewCs & X86_SEL_RPL) == 2
3281 && EMIsRawRing1Enabled(pVM)
3282 && pVCpu->iem.s.uCpl <= 1)
3283 {
3284 Log(("iret: Ring-1 compression fix: uNewCS=%#x -> %#x\n", uNewCs, (uNewCs & X86_SEL_MASK_OFF_RPL) | 1));
3285 uNewCs = (uNewCs & X86_SEL_MASK_OFF_RPL) | 2;
3286 }
3287 }
3288#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
3289
3290
3291 /* Privilege checks. */
3292 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3293 {
3294 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3295 {
3296 Log(("iret %04x:%08x - RPL != DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3297 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3298 }
3299 }
3300 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3301 {
3302 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3303 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3304 }
3305 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
3306 {
3307 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pVCpu->iem.s.uCpl));
3308 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3309 }
3310
3311 /* Present? */
3312 if (!DescCS.Legacy.Gen.u1Present)
3313 {
3314 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
3315 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3316 }
3317
3318 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3319
3320 /*
3321 * Return to outer level?
3322 */
3323 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
3324 {
3325 uint16_t uNewSS;
3326 uint32_t uNewESP;
3327 if (enmEffOpSize == IEMMODE_32BIT)
3328 {
3329 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 8, &uFrame.pv, &uNewRsp);
3330 if (rcStrict != VINF_SUCCESS)
3331 return rcStrict;
3332/** @todo We might be popping a 32-bit ESP from the IRET frame, but whether
3333 * 16-bit or 32-bit are being loaded into SP depends on the D/B
3334 * bit of the popped SS selector it turns out. */
3335 uNewESP = uFrame.pu32[0];
3336 uNewSS = (uint16_t)uFrame.pu32[1];
3337 }
3338 else
3339 {
3340 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 4, &uFrame.pv, &uNewRsp);
3341 if (rcStrict != VINF_SUCCESS)
3342 return rcStrict;
3343 uNewESP = uFrame.pu16[0];
3344 uNewSS = uFrame.pu16[1];
3345 }
3346 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
3347 if (rcStrict != VINF_SUCCESS)
3348 return rcStrict;
3349 Log7(("iemCImpl_iret_prot: uNewSS=%#06x uNewESP=%#010x\n", uNewSS, uNewESP));
3350
3351 /* Read the SS descriptor. */
3352 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3353 {
3354 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
3355 return iemRaiseGeneralProtectionFault0(pVCpu);
3356 }
3357
3358 IEMSELDESC DescSS;
3359 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
3360 if (rcStrict != VINF_SUCCESS)
3361 {
3362 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
3363 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
3364 return rcStrict;
3365 }
3366
3367 /* Privilege checks. */
3368 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3369 {
3370 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
3371 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3372 }
3373 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3374 {
3375 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
3376 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
3377 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3378 }
3379
3380 /* Must be a writeable data segment descriptor. */
3381 if (!DescSS.Legacy.Gen.u1DescType)
3382 {
3383 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
3384 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3385 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3386 }
3387 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3388 {
3389 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
3390 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3391 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3392 }
3393
3394 /* Present? */
3395 if (!DescSS.Legacy.Gen.u1Present)
3396 {
3397 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
3398 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
3399 }
3400
3401 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3402
3403 /* Check EIP. */
3404 if (uNewEip > cbLimitCS)
3405 {
3406 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
3407 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
3408 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3409 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3410 }
3411
3412 /*
3413 * Commit the changes, marking CS and SS accessed first since
3414 * that may fail.
3415 */
3416 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3417 {
3418 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3419 if (rcStrict != VINF_SUCCESS)
3420 return rcStrict;
3421 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3422 }
3423 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3424 {
3425 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3426 if (rcStrict != VINF_SUCCESS)
3427 return rcStrict;
3428 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3429 }
3430
3431 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3432 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3433 if (enmEffOpSize != IEMMODE_16BIT)
3434 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3435 if (pVCpu->iem.s.uCpl == 0)
3436 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3437 else if (pVCpu->iem.s.uCpl <= pCtx->eflags.Bits.u2IOPL)
3438 fEFlagsMask |= X86_EFL_IF;
3439 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3440 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3441 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu, pCtx);
3442 fEFlagsNew &= ~fEFlagsMask;
3443 fEFlagsNew |= uNewFlags & fEFlagsMask;
3444#ifdef DBGFTRACE_ENABLED
3445 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
3446 pVCpu->iem.s.uCpl, uNewCs & X86_SEL_RPL, pCtx->cs.Sel, pCtx->eip,
3447 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP);
3448#endif
3449
3450 IEMMISC_SET_EFL(pVCpu, pCtx, fEFlagsNew);
3451 pCtx->rip = uNewEip;
3452 pCtx->cs.Sel = uNewCs;
3453 pCtx->cs.ValidSel = uNewCs;
3454 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3455 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3456 pCtx->cs.u32Limit = cbLimitCS;
3457 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3458 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
3459
3460 pCtx->ss.Sel = uNewSS;
3461 pCtx->ss.ValidSel = uNewSS;
3462 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3463 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3464 pCtx->ss.u32Limit = cbLimitSs;
3465 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3466 if (!pCtx->ss.Attr.n.u1DefBig)
3467 pCtx->sp = (uint16_t)uNewESP;
3468 else
3469 pCtx->rsp = uNewESP;
3470
3471 pVCpu->iem.s.uCpl = uNewCs & X86_SEL_RPL;
3472 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
3473 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
3474 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
3475 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
3476
3477 /* Done! */
3478
3479 }
3480 /*
3481 * Return to the same level.
3482 */
3483 else
3484 {
3485 /* Check EIP. */
3486 if (uNewEip > cbLimitCS)
3487 {
3488 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
3489 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3490 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3491 }
3492
3493 /*
3494 * Commit the changes, marking CS first since it may fail.
3495 */
3496 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3497 {
3498 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3499 if (rcStrict != VINF_SUCCESS)
3500 return rcStrict;
3501 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3502 }
3503
3504 X86EFLAGS NewEfl;
3505 NewEfl.u = IEMMISC_GET_EFL(pVCpu, pCtx);
3506 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3507 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3508 if (enmEffOpSize != IEMMODE_16BIT)
3509 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3510 if (pVCpu->iem.s.uCpl == 0)
3511 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3512 else if (pVCpu->iem.s.uCpl <= NewEfl.Bits.u2IOPL)
3513 fEFlagsMask |= X86_EFL_IF;
3514 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3515 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3516 NewEfl.u &= ~fEFlagsMask;
3517 NewEfl.u |= fEFlagsMask & uNewFlags;
3518#ifdef DBGFTRACE_ENABLED
3519 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
3520 pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip,
3521 uNewCs, uNewEip, uNewFlags, pCtx->ss.Sel, uNewRsp);
3522#endif
3523
3524 IEMMISC_SET_EFL(pVCpu, pCtx, NewEfl.u);
3525 pCtx->rip = uNewEip;
3526 pCtx->cs.Sel = uNewCs;
3527 pCtx->cs.ValidSel = uNewCs;
3528 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3529 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3530 pCtx->cs.u32Limit = cbLimitCS;
3531 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3532 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
3533 if (!pCtx->ss.Attr.n.u1DefBig)
3534 pCtx->sp = (uint16_t)uNewRsp;
3535 else
3536 pCtx->rsp = uNewRsp;
3537 /* Done! */
3538 }
3539
3540 /* Flush the prefetch buffer. */
3541#ifdef IEM_WITH_CODE_TLB
3542 pVCpu->iem.s.pbInstrBuf = NULL;
3543#else
3544 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3545#endif
3546
3547 return VINF_SUCCESS;
3548}
3549
3550
3551/**
3552 * Implements iret for long mode
3553 *
3554 * @param enmEffOpSize The effective operand size.
3555 */
3556IEM_CIMPL_DEF_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize)
3557{
3558 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
3559 NOREF(cbInstr);
3560
3561 /*
3562 * Nested task return is not supported in long mode.
3563 */
3564 if (pCtx->eflags.Bits.u1NT)
3565 {
3566 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pCtx->eflags.u));
3567 return iemRaiseGeneralProtectionFault0(pVCpu);
3568 }
3569
3570 /*
3571 * Normal return.
3572 *
3573 * Do the stack bits, but don't commit RSP before everything checks
3574 * out right.
3575 */
3576 VBOXSTRICTRC rcStrict;
3577 RTCPTRUNION uFrame;
3578 uint64_t uNewRip;
3579 uint16_t uNewCs;
3580 uint16_t uNewSs;
3581 uint32_t uNewFlags;
3582 uint64_t uNewRsp;
3583 if (enmEffOpSize == IEMMODE_64BIT)
3584 {
3585 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, &uFrame.pv, &uNewRsp);
3586 if (rcStrict != VINF_SUCCESS)
3587 return rcStrict;
3588 uNewRip = uFrame.pu64[0];
3589 uNewCs = (uint16_t)uFrame.pu64[1];
3590 uNewFlags = (uint32_t)uFrame.pu64[2];
3591 uNewRsp = uFrame.pu64[3];
3592 uNewSs = (uint16_t)uFrame.pu64[4];
3593 }
3594 else if (enmEffOpSize == IEMMODE_32BIT)
3595 {
3596 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, &uFrame.pv, &uNewRsp);
3597 if (rcStrict != VINF_SUCCESS)
3598 return rcStrict;
3599 uNewRip = uFrame.pu32[0];
3600 uNewCs = (uint16_t)uFrame.pu32[1];
3601 uNewFlags = uFrame.pu32[2];
3602 uNewRsp = uFrame.pu32[3];
3603 uNewSs = (uint16_t)uFrame.pu32[4];
3604 }
3605 else
3606 {
3607 Assert(enmEffOpSize == IEMMODE_16BIT);
3608 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, &uFrame.pv, &uNewRsp);
3609 if (rcStrict != VINF_SUCCESS)
3610 return rcStrict;
3611 uNewRip = uFrame.pu16[0];
3612 uNewCs = uFrame.pu16[1];
3613 uNewFlags = uFrame.pu16[2];
3614 uNewRsp = uFrame.pu16[3];
3615 uNewSs = uFrame.pu16[4];
3616 }
3617 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3618 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3619 { /* extremely like */ }
3620 else
3621 return rcStrict;
3622 Log7(("iretq stack: cs:rip=%04x:%016RX64 rflags=%016RX64 ss:rsp=%04x:%016RX64\n", uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
3623
3624 /*
3625 * Check stuff.
3626 */
3627 /* Read the CS descriptor. */
3628 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3629 {
3630 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3631 return iemRaiseGeneralProtectionFault0(pVCpu);
3632 }
3633
3634 IEMSELDESC DescCS;
3635 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3636 if (rcStrict != VINF_SUCCESS)
3637 {
3638 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
3639 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3640 return rcStrict;
3641 }
3642
3643 /* Must be a code descriptor. */
3644 if ( !DescCS.Legacy.Gen.u1DescType
3645 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3646 {
3647 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
3648 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3649 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3650 }
3651
3652 /* Privilege checks. */
3653 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
3654 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3655 {
3656 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3657 {
3658 Log(("iret %04x:%016RX64 - RPL != DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3659 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3660 }
3661 }
3662 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3663 {
3664 Log(("iret %04x:%016RX64 - RPL < DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3665 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3666 }
3667 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
3668 {
3669 Log(("iret %04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl));
3670 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3671 }
3672
3673 /* Present? */
3674 if (!DescCS.Legacy.Gen.u1Present)
3675 {
3676 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3677 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3678 }
3679
3680 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3681
3682 /* Read the SS descriptor. */
3683 IEMSELDESC DescSS;
3684 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3685 {
3686 if ( !DescCS.Legacy.Gen.u1Long
3687 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
3688 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
3689 {
3690 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3691 return iemRaiseGeneralProtectionFault0(pVCpu);
3692 }
3693 DescSS.Legacy.u = 0;
3694 }
3695 else
3696 {
3697 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
3698 if (rcStrict != VINF_SUCCESS)
3699 {
3700 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
3701 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3702 return rcStrict;
3703 }
3704 }
3705
3706 /* Privilege checks. */
3707 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3708 {
3709 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3710 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3711 }
3712
3713 uint32_t cbLimitSs;
3714 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3715 cbLimitSs = UINT32_MAX;
3716 else
3717 {
3718 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3719 {
3720 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
3721 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
3722 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3723 }
3724
3725 /* Must be a writeable data segment descriptor. */
3726 if (!DescSS.Legacy.Gen.u1DescType)
3727 {
3728 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
3729 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3730 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3731 }
3732 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3733 {
3734 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
3735 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3736 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3737 }
3738
3739 /* Present? */
3740 if (!DescSS.Legacy.Gen.u1Present)
3741 {
3742 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3743 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSs);
3744 }
3745 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3746 }
3747
3748 /* Check EIP. */
3749 if (DescCS.Legacy.Gen.u1Long)
3750 {
3751 if (!IEM_IS_CANONICAL(uNewRip))
3752 {
3753 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
3754 uNewCs, uNewRip, uNewSs, uNewRsp));
3755 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3756 }
3757 }
3758 else
3759 {
3760 if (uNewRip > cbLimitCS)
3761 {
3762 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
3763 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
3764 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3765 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3766 }
3767 }
3768
3769 /*
3770 * Commit the changes, marking CS and SS accessed first since
3771 * that may fail.
3772 */
3773 /** @todo where exactly are these actually marked accessed by a real CPU? */
3774 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3775 {
3776 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3777 if (rcStrict != VINF_SUCCESS)
3778 return rcStrict;
3779 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3780 }
3781 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3782 {
3783 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSs);
3784 if (rcStrict != VINF_SUCCESS)
3785 return rcStrict;
3786 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3787 }
3788
3789 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3790 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3791 if (enmEffOpSize != IEMMODE_16BIT)
3792 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3793 if (pVCpu->iem.s.uCpl == 0)
3794 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
3795 else if (pVCpu->iem.s.uCpl <= pCtx->eflags.Bits.u2IOPL)
3796 fEFlagsMask |= X86_EFL_IF;
3797 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu, pCtx);
3798 fEFlagsNew &= ~fEFlagsMask;
3799 fEFlagsNew |= uNewFlags & fEFlagsMask;
3800#ifdef DBGFTRACE_ENABLED
3801 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
3802 pVCpu->iem.s.uCpl, uNewCpl, pCtx->rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);
3803#endif
3804
3805 IEMMISC_SET_EFL(pVCpu, pCtx, fEFlagsNew);
3806 pCtx->rip = uNewRip;
3807 pCtx->cs.Sel = uNewCs;
3808 pCtx->cs.ValidSel = uNewCs;
3809 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3810 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3811 pCtx->cs.u32Limit = cbLimitCS;
3812 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3813 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
3814 if (pCtx->cs.Attr.n.u1Long || pCtx->cs.Attr.n.u1DefBig)
3815 pCtx->rsp = uNewRsp;
3816 else
3817 pCtx->sp = (uint16_t)uNewRsp;
3818 pCtx->ss.Sel = uNewSs;
3819 pCtx->ss.ValidSel = uNewSs;
3820 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3821 {
3822 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3823 pCtx->ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
3824 pCtx->ss.u32Limit = UINT32_MAX;
3825 pCtx->ss.u64Base = 0;
3826 Log2(("iretq new SS: NULL\n"));
3827 }
3828 else
3829 {
3830 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3831 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3832 pCtx->ss.u32Limit = cbLimitSs;
3833 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3834 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u));
3835 }
3836
3837 if (pVCpu->iem.s.uCpl != uNewCpl)
3838 {
3839 pVCpu->iem.s.uCpl = uNewCpl;
3840 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->ds);
3841 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->es);
3842 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->fs);
3843 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->gs);
3844 }
3845
3846 /* Flush the prefetch buffer. */
3847#ifdef IEM_WITH_CODE_TLB
3848 pVCpu->iem.s.pbInstrBuf = NULL;
3849#else
3850 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3851#endif
3852
3853 return VINF_SUCCESS;
3854}
3855
3856
3857/**
3858 * Implements iret.
3859 *
3860 * @param enmEffOpSize The effective operand size.
3861 */
3862IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
3863{
3864 /*
3865 * First, clear NMI blocking, if any, before causing any exceptions.
3866 */
3867 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
3868
3869 /*
3870 * The SVM nested-guest intercept for iret takes priority over all exceptions,
3871 * see AMD spec. "15.9 Instruction Intercepts".
3872 */
3873 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IRET))
3874 {
3875 Log(("iret: Guest intercept -> #VMEXIT\n"));
3876 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3877 }
3878
3879 /*
3880 * Call a mode specific worker.
3881 */
3882 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
3883 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
3884 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
3885 return IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize);
3886 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
3887}
3888
3889
3890/**
3891 * Implements SYSCALL (AMD and Intel64).
3892 *
3893 * @param enmEffOpSize The effective operand size.
3894 */
3895IEM_CIMPL_DEF_0(iemCImpl_syscall)
3896{
3897 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
3898
3899 /*
3900 * Check preconditions.
3901 *
3902 * Note that CPUs described in the documentation may load a few odd values
3903 * into CS and SS than we allow here. This has yet to be checked on real
3904 * hardware.
3905 */
3906 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
3907 {
3908 Log(("syscall: Not enabled in EFER -> #UD\n"));
3909 return iemRaiseUndefinedOpcode(pVCpu);
3910 }
3911 if (!(pCtx->cr0 & X86_CR0_PE))
3912 {
3913 Log(("syscall: Protected mode is required -> #GP(0)\n"));
3914 return iemRaiseGeneralProtectionFault0(pVCpu);
3915 }
3916 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(pCtx))
3917 {
3918 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3919 return iemRaiseUndefinedOpcode(pVCpu);
3920 }
3921
3922 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
3923 /** @todo what about LDT selectors? Shouldn't matter, really. */
3924 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
3925 uint16_t uNewSs = uNewCs + 8;
3926 if (uNewCs == 0 || uNewSs == 0)
3927 {
3928 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
3929 return iemRaiseGeneralProtectionFault0(pVCpu);
3930 }
3931
3932 /* Long mode and legacy mode differs. */
3933 if (CPUMIsGuestInLongModeEx(pCtx))
3934 {
3935 uint64_t uNewRip = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->msrLSTAR : pCtx-> msrCSTAR;
3936
3937 /* This test isn't in the docs, but I'm not trusting the guys writing
3938 the MSRs to have validated the values as canonical like they should. */
3939 if (!IEM_IS_CANONICAL(uNewRip))
3940 {
3941 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3942 return iemRaiseUndefinedOpcode(pVCpu);
3943 }
3944
3945 /*
3946 * Commit it.
3947 */
3948 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, uNewRip));
3949 pCtx->rcx = pCtx->rip + cbInstr;
3950 pCtx->rip = uNewRip;
3951
3952 pCtx->rflags.u &= ~X86_EFL_RF;
3953 pCtx->r11 = pCtx->rflags.u;
3954 pCtx->rflags.u &= ~pCtx->msrSFMASK;
3955 pCtx->rflags.u |= X86_EFL_1;
3956
3957 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3958 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3959 }
3960 else
3961 {
3962 /*
3963 * Commit it.
3964 */
3965 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n",
3966 pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, (uint32_t)(pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
3967 pCtx->rcx = pCtx->eip + cbInstr;
3968 pCtx->rip = pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
3969 pCtx->rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
3970
3971 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3972 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3973 }
3974 pCtx->cs.Sel = uNewCs;
3975 pCtx->cs.ValidSel = uNewCs;
3976 pCtx->cs.u64Base = 0;
3977 pCtx->cs.u32Limit = UINT32_MAX;
3978 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3979
3980 pCtx->ss.Sel = uNewSs;
3981 pCtx->ss.ValidSel = uNewSs;
3982 pCtx->ss.u64Base = 0;
3983 pCtx->ss.u32Limit = UINT32_MAX;
3984 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3985
3986 /* Flush the prefetch buffer. */
3987#ifdef IEM_WITH_CODE_TLB
3988 pVCpu->iem.s.pbInstrBuf = NULL;
3989#else
3990 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3991#endif
3992
3993 return VINF_SUCCESS;
3994}
3995
3996
3997/**
3998 * Implements SYSRET (AMD and Intel64).
3999 */
4000IEM_CIMPL_DEF_0(iemCImpl_sysret)
4001
4002{
4003 RT_NOREF_PV(cbInstr);
4004 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4005
4006 /*
4007 * Check preconditions.
4008 *
4009 * Note that CPUs described in the documentation may load a few odd values
4010 * into CS and SS than we allow here. This has yet to be checked on real
4011 * hardware.
4012 */
4013 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
4014 {
4015 Log(("sysret: Not enabled in EFER -> #UD\n"));
4016 return iemRaiseUndefinedOpcode(pVCpu);
4017 }
4018 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(pCtx))
4019 {
4020 Log(("sysret: Only available in long mode on intel -> #UD\n"));
4021 return iemRaiseUndefinedOpcode(pVCpu);
4022 }
4023 if (!(pCtx->cr0 & X86_CR0_PE))
4024 {
4025 Log(("sysret: Protected mode is required -> #GP(0)\n"));
4026 return iemRaiseGeneralProtectionFault0(pVCpu);
4027 }
4028 if (pVCpu->iem.s.uCpl != 0)
4029 {
4030 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
4031 return iemRaiseGeneralProtectionFault0(pVCpu);
4032 }
4033
4034 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
4035 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4036 uint16_t uNewSs = uNewCs + 8;
4037 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
4038 uNewCs += 16;
4039 if (uNewCs == 0 || uNewSs == 0)
4040 {
4041 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4042 return iemRaiseGeneralProtectionFault0(pVCpu);
4043 }
4044
4045 /*
4046 * Commit it.
4047 */
4048 if (CPUMIsGuestInLongModeEx(pCtx))
4049 {
4050 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
4051 {
4052 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n",
4053 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->rcx, pCtx->r11));
4054 /* Note! We disregard intel manual regarding the RCX cananonical
4055 check, ask intel+xen why AMD doesn't do it. */
4056 pCtx->rip = pCtx->rcx;
4057 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4058 | (3 << X86DESCATTR_DPL_SHIFT);
4059 }
4060 else
4061 {
4062 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n",
4063 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->ecx, pCtx->r11));
4064 pCtx->rip = pCtx->ecx;
4065 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4066 | (3 << X86DESCATTR_DPL_SHIFT);
4067 }
4068 /** @todo testcase: See what kind of flags we can make SYSRET restore and
4069 * what it really ignores. RF and VM are hinted at being zero, by AMD. */
4070 pCtx->rflags.u = pCtx->r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
4071 pCtx->rflags.u |= X86_EFL_1;
4072 }
4073 else
4074 {
4075 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, pCtx->ecx));
4076 pCtx->rip = pCtx->rcx;
4077 pCtx->rflags.u |= X86_EFL_IF;
4078 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4079 | (3 << X86DESCATTR_DPL_SHIFT);
4080 }
4081 pCtx->cs.Sel = uNewCs | 3;
4082 pCtx->cs.ValidSel = uNewCs | 3;
4083 pCtx->cs.u64Base = 0;
4084 pCtx->cs.u32Limit = UINT32_MAX;
4085 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4086
4087 pCtx->ss.Sel = uNewSs | 3;
4088 pCtx->ss.ValidSel = uNewSs | 3;
4089 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4090 /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */
4091 pCtx->ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
4092 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
4093 * on sysret. */
4094
4095 /* Flush the prefetch buffer. */
4096#ifdef IEM_WITH_CODE_TLB
4097 pVCpu->iem.s.pbInstrBuf = NULL;
4098#else
4099 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4100#endif
4101
4102 return VINF_SUCCESS;
4103}
4104
4105
4106/**
4107 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
4108 *
4109 * @param iSegReg The segment register number (valid).
4110 * @param uSel The new selector value.
4111 */
4112IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
4113{
4114 /*PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);*/
4115 uint16_t *pSel = iemSRegRef(pVCpu, iSegReg);
4116 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4117
4118 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
4119
4120 /*
4121 * Real mode and V8086 mode are easy.
4122 */
4123 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT
4124 && IEM_IS_REAL_OR_V86_MODE(pVCpu))
4125 {
4126 *pSel = uSel;
4127 pHid->u64Base = (uint32_t)uSel << 4;
4128 pHid->ValidSel = uSel;
4129 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4130#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
4131 /** @todo Does the CPU actually load limits and attributes in the
4132 * real/V8086 mode segment load case? It doesn't for CS in far
4133 * jumps... Affects unreal mode. */
4134 pHid->u32Limit = 0xffff;
4135 pHid->Attr.u = 0;
4136 pHid->Attr.n.u1Present = 1;
4137 pHid->Attr.n.u1DescType = 1;
4138 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
4139 ? X86_SEL_TYPE_RW
4140 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
4141#endif
4142 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4143 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4144 return VINF_SUCCESS;
4145 }
4146
4147 /*
4148 * Protected mode.
4149 *
4150 * Check if it's a null segment selector value first, that's OK for DS, ES,
4151 * FS and GS. If not null, then we have to load and parse the descriptor.
4152 */
4153 if (!(uSel & X86_SEL_MASK_OFF_RPL))
4154 {
4155 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
4156 if (iSegReg == X86_SREG_SS)
4157 {
4158 /* In 64-bit kernel mode, the stack can be 0 because of the way
4159 interrupts are dispatched. AMD seems to have a slighly more
4160 relaxed relationship to SS.RPL than intel does. */
4161 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
4162 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4163 || pVCpu->iem.s.uCpl > 2
4164 || ( uSel != pVCpu->iem.s.uCpl
4165 && !IEM_IS_GUEST_CPU_AMD(pVCpu)) )
4166 {
4167 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
4168 return iemRaiseGeneralProtectionFault0(pVCpu);
4169 }
4170 }
4171
4172 *pSel = uSel; /* Not RPL, remember :-) */
4173 iemHlpLoadNullDataSelectorProt(pVCpu, pHid, uSel);
4174 if (iSegReg == X86_SREG_SS)
4175 pHid->Attr.u |= pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT;
4176
4177 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4178 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4179
4180 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4181 return VINF_SUCCESS;
4182 }
4183
4184 /* Fetch the descriptor. */
4185 IEMSELDESC Desc;
4186 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
4187 if (rcStrict != VINF_SUCCESS)
4188 return rcStrict;
4189
4190 /* Check GPs first. */
4191 if (!Desc.Legacy.Gen.u1DescType)
4192 {
4193 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
4194 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4195 }
4196 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
4197 {
4198 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4199 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
4200 {
4201 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
4202 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4203 }
4204 if ((uSel & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
4205 {
4206 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pVCpu->iem.s.uCpl));
4207 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4208 }
4209 if (Desc.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl)
4210 {
4211 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4212 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4213 }
4214 }
4215 else
4216 {
4217 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4218 {
4219 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
4220 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4221 }
4222 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4223 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4224 {
4225#if 0 /* this is what intel says. */
4226 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4227 && pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4228 {
4229 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
4230 iSegReg, uSel, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl));
4231 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4232 }
4233#else /* this is what makes more sense. */
4234 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4235 {
4236 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4237 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4238 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4239 }
4240 if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4241 {
4242 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4243 iSegReg, uSel, pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl));
4244 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4245 }
4246#endif
4247 }
4248 }
4249
4250 /* Is it there? */
4251 if (!Desc.Legacy.Gen.u1Present)
4252 {
4253 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4254 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
4255 }
4256
4257 /* The base and limit. */
4258 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4259 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
4260
4261 /*
4262 * Ok, everything checked out fine. Now set the accessed bit before
4263 * committing the result into the registers.
4264 */
4265 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4266 {
4267 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
4268 if (rcStrict != VINF_SUCCESS)
4269 return rcStrict;
4270 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4271 }
4272
4273 /* commit */
4274 *pSel = uSel;
4275 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4276 pHid->u32Limit = cbLimit;
4277 pHid->u64Base = u64Base;
4278 pHid->ValidSel = uSel;
4279 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4280
4281 /** @todo check if the hidden bits are loaded correctly for 64-bit
4282 * mode. */
4283 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4284
4285 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4286 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4287 return VINF_SUCCESS;
4288}
4289
4290
4291/**
4292 * Implements 'mov SReg, r/m'.
4293 *
4294 * @param iSegReg The segment register number (valid).
4295 * @param uSel The new selector value.
4296 */
4297IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
4298{
4299 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4300 if (rcStrict == VINF_SUCCESS)
4301 {
4302 if (iSegReg == X86_SREG_SS)
4303 {
4304 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4305 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
4306 }
4307 }
4308 return rcStrict;
4309}
4310
4311
4312/**
4313 * Implements 'pop SReg'.
4314 *
4315 * @param iSegReg The segment register number (valid).
4316 * @param enmEffOpSize The efficient operand size (valid).
4317 */
4318IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4319{
4320 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4321 VBOXSTRICTRC rcStrict;
4322
4323 /*
4324 * Read the selector off the stack and join paths with mov ss, reg.
4325 */
4326 RTUINT64U TmpRsp;
4327 TmpRsp.u = pCtx->rsp;
4328 switch (enmEffOpSize)
4329 {
4330 case IEMMODE_16BIT:
4331 {
4332 uint16_t uSel;
4333 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp);
4334 if (rcStrict == VINF_SUCCESS)
4335 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4336 break;
4337 }
4338
4339 case IEMMODE_32BIT:
4340 {
4341 uint32_t u32Value;
4342 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp);
4343 if (rcStrict == VINF_SUCCESS)
4344 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
4345 break;
4346 }
4347
4348 case IEMMODE_64BIT:
4349 {
4350 uint64_t u64Value;
4351 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp);
4352 if (rcStrict == VINF_SUCCESS)
4353 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
4354 break;
4355 }
4356 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4357 }
4358
4359 /*
4360 * Commit the stack on success.
4361 */
4362 if (rcStrict == VINF_SUCCESS)
4363 {
4364 pCtx->rsp = TmpRsp.u;
4365 if (iSegReg == X86_SREG_SS)
4366 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
4367 }
4368 return rcStrict;
4369}
4370
4371
4372/**
4373 * Implements lgs, lfs, les, lds & lss.
4374 */
4375IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
4376 uint16_t, uSel,
4377 uint64_t, offSeg,
4378 uint8_t, iSegReg,
4379 uint8_t, iGReg,
4380 IEMMODE, enmEffOpSize)
4381{
4382 /*PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);*/
4383 VBOXSTRICTRC rcStrict;
4384
4385 /*
4386 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
4387 */
4388 /** @todo verify and test that mov, pop and lXs works the segment
4389 * register loading in the exact same way. */
4390 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4391 if (rcStrict == VINF_SUCCESS)
4392 {
4393 switch (enmEffOpSize)
4394 {
4395 case IEMMODE_16BIT:
4396 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4397 break;
4398 case IEMMODE_32BIT:
4399 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4400 break;
4401 case IEMMODE_64BIT:
4402 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4403 break;
4404 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4405 }
4406 }
4407
4408 return rcStrict;
4409}
4410
4411
4412/**
4413 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
4414 *
4415 * @retval VINF_SUCCESS on success.
4416 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
4417 * @retval iemMemFetchSysU64 return value.
4418 *
4419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4420 * @param uSel The selector value.
4421 * @param fAllowSysDesc Whether system descriptors are OK or not.
4422 * @param pDesc Where to return the descriptor on success.
4423 */
4424static VBOXSTRICTRC iemCImpl_LoadDescHelper(PVMCPU pVCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
4425{
4426 pDesc->Long.au64[0] = 0;
4427 pDesc->Long.au64[1] = 0;
4428
4429 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */
4430 return VINF_IEM_SELECTOR_NOT_OK;
4431
4432 /* Within the table limits? */
4433 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4434 RTGCPTR GCPtrBase;
4435 if (uSel & X86_SEL_LDT)
4436 {
4437 if ( !pCtx->ldtr.Attr.n.u1Present
4438 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
4439 return VINF_IEM_SELECTOR_NOT_OK;
4440 GCPtrBase = pCtx->ldtr.u64Base;
4441 }
4442 else
4443 {
4444 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
4445 return VINF_IEM_SELECTOR_NOT_OK;
4446 GCPtrBase = pCtx->gdtr.pGdt;
4447 }
4448
4449 /* Fetch the descriptor. */
4450 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4451 if (rcStrict != VINF_SUCCESS)
4452 return rcStrict;
4453 if (!pDesc->Legacy.Gen.u1DescType)
4454 {
4455 if (!fAllowSysDesc)
4456 return VINF_IEM_SELECTOR_NOT_OK;
4457 if (CPUMIsGuestInLongModeEx(pCtx))
4458 {
4459 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
4460 if (rcStrict != VINF_SUCCESS)
4461 return rcStrict;
4462 }
4463
4464 }
4465
4466 return VINF_SUCCESS;
4467}
4468
4469
4470/**
4471 * Implements verr (fWrite = false) and verw (fWrite = true).
4472 */
4473IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite)
4474{
4475 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4476
4477 /** @todo figure whether the accessed bit is set or not. */
4478
4479 bool fAccessible = true;
4480 IEMSELDESC Desc;
4481 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4482 if (rcStrict == VINF_SUCCESS)
4483 {
4484 /* Check the descriptor, order doesn't matter much here. */
4485 if ( !Desc.Legacy.Gen.u1DescType
4486 || !Desc.Legacy.Gen.u1Present)
4487 fAccessible = false;
4488 else
4489 {
4490 if ( fWrite
4491 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE
4492 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4493 fAccessible = false;
4494
4495 /** @todo testcase for the conforming behavior. */
4496 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4497 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4498 {
4499 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4500 fAccessible = false;
4501 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4502 fAccessible = false;
4503 }
4504 }
4505
4506 }
4507 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4508 fAccessible = false;
4509 else
4510 return rcStrict;
4511
4512 /* commit */
4513 IEM_GET_CTX(pVCpu)->eflags.Bits.u1ZF = fAccessible;
4514
4515 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4516 return VINF_SUCCESS;
4517}
4518
4519
4520/**
4521 * Implements LAR and LSL with 64-bit operand size.
4522 *
4523 * @returns VINF_SUCCESS.
4524 * @param pu16Dst Pointer to the destination register.
4525 * @param uSel The selector to load details for.
4526 * @param fIsLar true = LAR, false = LSL.
4527 */
4528IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar)
4529{
4530 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4531
4532 /** @todo figure whether the accessed bit is set or not. */
4533
4534 bool fDescOk = true;
4535 IEMSELDESC Desc;
4536 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4537 if (rcStrict == VINF_SUCCESS)
4538 {
4539 /*
4540 * Check the descriptor type.
4541 */
4542 if (!Desc.Legacy.Gen.u1DescType)
4543 {
4544 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4545 {
4546 if (Desc.Long.Gen.u5Zeros)
4547 fDescOk = false;
4548 else
4549 switch (Desc.Long.Gen.u4Type)
4550 {
4551 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
4552 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
4553 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
4554 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
4555 break;
4556 case AMD64_SEL_TYPE_SYS_CALL_GATE:
4557 fDescOk = fIsLar;
4558 break;
4559 default:
4560 fDescOk = false;
4561 break;
4562 }
4563 }
4564 else
4565 {
4566 switch (Desc.Long.Gen.u4Type)
4567 {
4568 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4569 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4570 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4571 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4572 case X86_SEL_TYPE_SYS_LDT:
4573 break;
4574 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4575 case X86_SEL_TYPE_SYS_TASK_GATE:
4576 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4577 fDescOk = fIsLar;
4578 break;
4579 default:
4580 fDescOk = false;
4581 break;
4582 }
4583 }
4584 }
4585 if (fDescOk)
4586 {
4587 /*
4588 * Check the RPL/DPL/CPL interaction..
4589 */
4590 /** @todo testcase for the conforming behavior. */
4591 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
4592 || !Desc.Legacy.Gen.u1DescType)
4593 {
4594 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4595 fDescOk = false;
4596 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4597 fDescOk = false;
4598 }
4599 }
4600
4601 if (fDescOk)
4602 {
4603 /*
4604 * All fine, start committing the result.
4605 */
4606 if (fIsLar)
4607 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00);
4608 else
4609 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy);
4610 }
4611
4612 }
4613 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4614 fDescOk = false;
4615 else
4616 return rcStrict;
4617
4618 /* commit flags value and advance rip. */
4619 IEM_GET_CTX(pVCpu)->eflags.Bits.u1ZF = fDescOk;
4620 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4621
4622 return VINF_SUCCESS;
4623}
4624
4625
4626/**
4627 * Implements LAR and LSL with 16-bit operand size.
4628 *
4629 * @returns VINF_SUCCESS.
4630 * @param pu16Dst Pointer to the destination register.
4631 * @param u16Sel The selector to load details for.
4632 * @param fIsLar true = LAR, false = LSL.
4633 */
4634IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar)
4635{
4636 uint64_t u64TmpDst = *pu16Dst;
4637 IEM_CIMPL_CALL_3(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, fIsLar);
4638 *pu16Dst = u64TmpDst;
4639 return VINF_SUCCESS;
4640}
4641
4642
4643/**
4644 * Implements lgdt.
4645 *
4646 * @param iEffSeg The segment of the new gdtr contents
4647 * @param GCPtrEffSrc The address of the new gdtr contents.
4648 * @param enmEffOpSize The effective operand size.
4649 */
4650IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4651{
4652 if (pVCpu->iem.s.uCpl != 0)
4653 return iemRaiseGeneralProtectionFault0(pVCpu);
4654 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
4655
4656 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_WRITES))
4657 {
4658 Log(("lgdt: Guest intercept -> #VMEXIT\n"));
4659 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4660 }
4661
4662 /*
4663 * Fetch the limit and base address.
4664 */
4665 uint16_t cbLimit;
4666 RTGCPTR GCPtrBase;
4667 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4668 if (rcStrict == VINF_SUCCESS)
4669 {
4670 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4671 || X86_IS_CANONICAL(GCPtrBase))
4672 {
4673 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4674 rcStrict = CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
4675 else
4676 {
4677 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4678 pCtx->gdtr.cbGdt = cbLimit;
4679 pCtx->gdtr.pGdt = GCPtrBase;
4680 }
4681 if (rcStrict == VINF_SUCCESS)
4682 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4683 }
4684 else
4685 {
4686 Log(("iemCImpl_lgdt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
4687 return iemRaiseGeneralProtectionFault0(pVCpu);
4688 }
4689 }
4690 return rcStrict;
4691}
4692
4693
4694/**
4695 * Implements sgdt.
4696 *
4697 * @param iEffSeg The segment where to store the gdtr content.
4698 * @param GCPtrEffDst The address where to store the gdtr content.
4699 */
4700IEM_CIMPL_DEF_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4701{
4702 /*
4703 * Join paths with sidt.
4704 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4705 * you really must know.
4706 */
4707 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4708 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst);
4709 if (rcStrict == VINF_SUCCESS)
4710 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4711 return rcStrict;
4712}
4713
4714
4715/**
4716 * Implements lidt.
4717 *
4718 * @param iEffSeg The segment of the new idtr contents
4719 * @param GCPtrEffSrc The address of the new idtr contents.
4720 * @param enmEffOpSize The effective operand size.
4721 */
4722IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4723{
4724 if (pVCpu->iem.s.uCpl != 0)
4725 return iemRaiseGeneralProtectionFault0(pVCpu);
4726 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
4727
4728 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_WRITES))
4729 {
4730 Log(("lidt: Guest intercept -> #VMEXIT\n"));
4731 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4732 }
4733
4734 /*
4735 * Fetch the limit and base address.
4736 */
4737 uint16_t cbLimit;
4738 RTGCPTR GCPtrBase;
4739 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4740 if (rcStrict == VINF_SUCCESS)
4741 {
4742 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4743 || X86_IS_CANONICAL(GCPtrBase))
4744 {
4745 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4746 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
4747 else
4748 {
4749 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4750 pCtx->idtr.cbIdt = cbLimit;
4751 pCtx->idtr.pIdt = GCPtrBase;
4752 }
4753 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4754 }
4755 else
4756 {
4757 Log(("iemCImpl_lidt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
4758 return iemRaiseGeneralProtectionFault0(pVCpu);
4759 }
4760 }
4761 return rcStrict;
4762}
4763
4764
4765/**
4766 * Implements sidt.
4767 *
4768 * @param iEffSeg The segment where to store the idtr content.
4769 * @param GCPtrEffDst The address where to store the idtr content.
4770 */
4771IEM_CIMPL_DEF_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4772{
4773 /*
4774 * Join paths with sgdt.
4775 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4776 * you really must know.
4777 */
4778 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4779 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst);
4780 if (rcStrict == VINF_SUCCESS)
4781 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4782 return rcStrict;
4783}
4784
4785
4786/**
4787 * Implements lldt.
4788 *
4789 * @param uNewLdt The new LDT selector value.
4790 */
4791IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
4792{
4793 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4794
4795 /*
4796 * Check preconditions.
4797 */
4798 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4799 {
4800 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
4801 return iemRaiseUndefinedOpcode(pVCpu);
4802 }
4803 if (pVCpu->iem.s.uCpl != 0)
4804 {
4805 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pVCpu->iem.s.uCpl));
4806 return iemRaiseGeneralProtectionFault0(pVCpu);
4807 }
4808 if (uNewLdt & X86_SEL_LDT)
4809 {
4810 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
4811 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewLdt);
4812 }
4813
4814 /*
4815 * Now, loading a NULL selector is easy.
4816 */
4817 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4818 {
4819 /* Nested-guest SVM intercept. */
4820 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
4821 {
4822 Log(("lldt: Guest intercept -> #VMEXIT\n"));
4823 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4824 }
4825
4826 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
4827 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4828 CPUMSetGuestLDTR(pVCpu, uNewLdt);
4829 else
4830 pCtx->ldtr.Sel = uNewLdt;
4831 pCtx->ldtr.ValidSel = uNewLdt;
4832 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4833 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4834 {
4835 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4836 pCtx->ldtr.u64Base = pCtx->ldtr.u32Limit = 0; /* For verfication against REM. */
4837 }
4838 else if (IEM_IS_GUEST_CPU_AMD(pVCpu))
4839 {
4840 /* AMD-V seems to leave the base and limit alone. */
4841 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4842 }
4843 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4844 {
4845 /* VT-x (Intel 3960x) seems to be doing the following. */
4846 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;
4847 pCtx->ldtr.u64Base = 0;
4848 pCtx->ldtr.u32Limit = UINT32_MAX;
4849 }
4850
4851 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4852 return VINF_SUCCESS;
4853 }
4854
4855 /*
4856 * Read the descriptor.
4857 */
4858 IEMSELDESC Desc;
4859 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
4860 if (rcStrict != VINF_SUCCESS)
4861 return rcStrict;
4862
4863 /* Check GPs first. */
4864 if (Desc.Legacy.Gen.u1DescType)
4865 {
4866 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4867 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4868 }
4869 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4870 {
4871 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4872 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4873 }
4874 uint64_t u64Base;
4875 if (!IEM_IS_LONG_MODE(pVCpu))
4876 u64Base = X86DESC_BASE(&Desc.Legacy);
4877 else
4878 {
4879 if (Desc.Long.Gen.u5Zeros)
4880 {
4881 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
4882 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4883 }
4884
4885 u64Base = X86DESC64_BASE(&Desc.Long);
4886 if (!IEM_IS_CANONICAL(u64Base))
4887 {
4888 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
4889 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4890 }
4891 }
4892
4893 /* NP */
4894 if (!Desc.Legacy.Gen.u1Present)
4895 {
4896 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
4897 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewLdt);
4898 }
4899
4900 /* Nested-guest SVM intercept. */
4901 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
4902 {
4903 Log(("lldt: Guest intercept -> #VMEXIT\n"));
4904 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4905 }
4906
4907 /*
4908 * It checks out alright, update the registers.
4909 */
4910/** @todo check if the actual value is loaded or if the RPL is dropped */
4911 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4912 CPUMSetGuestLDTR(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4913 else
4914 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4915 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4916 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4917 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4918 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
4919 pCtx->ldtr.u64Base = u64Base;
4920
4921 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4922 return VINF_SUCCESS;
4923}
4924
4925
4926/**
4927 * Implements lldt.
4928 *
4929 * @param uNewLdt The new LDT selector value.
4930 */
4931IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
4932{
4933 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4934
4935 /*
4936 * Check preconditions.
4937 */
4938 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4939 {
4940 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
4941 return iemRaiseUndefinedOpcode(pVCpu);
4942 }
4943 if (pVCpu->iem.s.uCpl != 0)
4944 {
4945 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pVCpu->iem.s.uCpl));
4946 return iemRaiseGeneralProtectionFault0(pVCpu);
4947 }
4948 if (uNewTr & X86_SEL_LDT)
4949 {
4950 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
4951 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewTr);
4952 }
4953 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
4954 {
4955 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
4956 return iemRaiseGeneralProtectionFault0(pVCpu);
4957 }
4958 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TR_WRITES))
4959 {
4960 Log(("ltr: Guest intercept -> #VMEXIT\n"));
4961 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4962 }
4963
4964 /*
4965 * Read the descriptor.
4966 */
4967 IEMSELDESC Desc;
4968 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
4969 if (rcStrict != VINF_SUCCESS)
4970 return rcStrict;
4971
4972 /* Check GPs first. */
4973 if (Desc.Legacy.Gen.u1DescType)
4974 {
4975 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
4976 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4977 }
4978 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
4979 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4980 || IEM_IS_LONG_MODE(pVCpu)) )
4981 {
4982 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
4983 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4984 }
4985 uint64_t u64Base;
4986 if (!IEM_IS_LONG_MODE(pVCpu))
4987 u64Base = X86DESC_BASE(&Desc.Legacy);
4988 else
4989 {
4990 if (Desc.Long.Gen.u5Zeros)
4991 {
4992 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
4993 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4994 }
4995
4996 u64Base = X86DESC64_BASE(&Desc.Long);
4997 if (!IEM_IS_CANONICAL(u64Base))
4998 {
4999 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
5000 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5001 }
5002 }
5003
5004 /* NP */
5005 if (!Desc.Legacy.Gen.u1Present)
5006 {
5007 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
5008 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewTr);
5009 }
5010
5011 /*
5012 * Set it busy.
5013 * Note! Intel says this should lock down the whole descriptor, but we'll
5014 * restrict our selves to 32-bit for now due to lack of inline
5015 * assembly and such.
5016 */
5017 void *pvDesc;
5018 rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW);
5019 if (rcStrict != VINF_SUCCESS)
5020 return rcStrict;
5021 switch ((uintptr_t)pvDesc & 3)
5022 {
5023 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
5024 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
5025 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
5026 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
5027 }
5028 rcStrict = iemMemCommitAndUnmap(pVCpu, pvDesc, IEM_ACCESS_DATA_RW);
5029 if (rcStrict != VINF_SUCCESS)
5030 return rcStrict;
5031 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
5032
5033 /*
5034 * It checks out alright, update the registers.
5035 */
5036/** @todo check if the actual value is loaded or if the RPL is dropped */
5037 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5038 CPUMSetGuestTR(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5039 else
5040 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
5041 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
5042 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
5043 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5044 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5045 pCtx->tr.u64Base = u64Base;
5046
5047 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5048 return VINF_SUCCESS;
5049}
5050
5051
5052/**
5053 * Implements mov GReg,CRx.
5054 *
5055 * @param iGReg The general register to store the CRx value in.
5056 * @param iCrReg The CRx register to read (valid).
5057 */
5058IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
5059{
5060 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5061 if (pVCpu->iem.s.uCpl != 0)
5062 return iemRaiseGeneralProtectionFault0(pVCpu);
5063 Assert(!pCtx->eflags.Bits.u1VM);
5064
5065 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(pVCpu, iCrReg))
5066 {
5067 Log(("iemCImpl_mov_Rd_Cd%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5068 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);
5069 }
5070
5071 /* read it */
5072 uint64_t crX;
5073 switch (iCrReg)
5074 {
5075 case 0:
5076 crX = pCtx->cr0;
5077 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
5078 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */
5079 break;
5080 case 2: crX = pCtx->cr2; break;
5081 case 3: crX = pCtx->cr3; break;
5082 case 4: crX = pCtx->cr4; break;
5083 case 8:
5084 {
5085#ifdef VBOX_WITH_NESTED_HWVIRT
5086 if (pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u1VIntrMasking)
5087 {
5088 crX = pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u8VTPR;
5089 break;
5090 }
5091#endif
5092 uint8_t uTpr;
5093 int rc = APICGetTpr(pVCpu, &uTpr, NULL, NULL);
5094 if (RT_SUCCESS(rc))
5095 crX = uTpr >> 4;
5096 else
5097 crX = 0;
5098 break;
5099 }
5100 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5101 }
5102
5103 /* store it */
5104 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5105 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = crX;
5106 else
5107 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)crX;
5108
5109 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5110 return VINF_SUCCESS;
5111}
5112
5113
5114/**
5115 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
5116 *
5117 * @param iCrReg The CRx register to write (valid).
5118 * @param uNewCrX The new value.
5119 * @param enmAccessCrx The instruction that caused the CrX load.
5120 * @param iGReg The general register in case of a 'mov CRx,GReg'
5121 * instruction.
5122 */
5123IEM_CIMPL_DEF_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg)
5124{
5125 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5126 VBOXSTRICTRC rcStrict;
5127 int rc;
5128#ifndef VBOX_WITH_NESTED_HWVIRT
5129 RT_NOREF2(iGReg, enmAccessCrX);
5130#endif
5131
5132 /*
5133 * Try store it.
5134 * Unfortunately, CPUM only does a tiny bit of the work.
5135 */
5136 switch (iCrReg)
5137 {
5138 case 0:
5139 {
5140 /*
5141 * Perform checks.
5142 */
5143 uint64_t const uOldCrX = pCtx->cr0;
5144 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
5145 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
5146 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
5147
5148 /* ET is hardcoded on 486 and later. */
5149 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_486)
5150 uNewCrX |= X86_CR0_ET;
5151 /* The 386 and 486 didn't #GP(0) on attempting to set reserved CR0 bits. ET was settable on 386. */
5152 else if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_486)
5153 {
5154 uNewCrX &= fValid;
5155 uNewCrX |= X86_CR0_ET;
5156 }
5157 else
5158 uNewCrX &= X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG | X86_CR0_ET;
5159
5160 /* Check for reserved bits. */
5161 if (uNewCrX & ~(uint64_t)fValid)
5162 {
5163 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5164 return iemRaiseGeneralProtectionFault0(pVCpu);
5165 }
5166
5167 /* Check for invalid combinations. */
5168 if ( (uNewCrX & X86_CR0_PG)
5169 && !(uNewCrX & X86_CR0_PE) )
5170 {
5171 Log(("Trying to set CR0.PG without CR0.PE\n"));
5172 return iemRaiseGeneralProtectionFault0(pVCpu);
5173 }
5174
5175 if ( !(uNewCrX & X86_CR0_CD)
5176 && (uNewCrX & X86_CR0_NW) )
5177 {
5178 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
5179 return iemRaiseGeneralProtectionFault0(pVCpu);
5180 }
5181
5182 /* Long mode consistency checks. */
5183 if ( (uNewCrX & X86_CR0_PG)
5184 && !(uOldCrX & X86_CR0_PG)
5185 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
5186 {
5187 if (!(pCtx->cr4 & X86_CR4_PAE))
5188 {
5189 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
5190 return iemRaiseGeneralProtectionFault0(pVCpu);
5191 }
5192 if (pCtx->cs.Attr.n.u1Long)
5193 {
5194 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
5195 return iemRaiseGeneralProtectionFault0(pVCpu);
5196 }
5197 }
5198
5199 /** @todo check reserved PDPTR bits as AMD states. */
5200
5201 /*
5202 * SVM nested-guest CR0 write intercepts.
5203 */
5204 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, iCrReg))
5205 {
5206 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5207 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);
5208 }
5209 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITES))
5210 {
5211 /* 'lmsw' intercepts regardless of whether the TS/MP bits are actually toggled. */
5212 if ( enmAccessCrX == IEMACCESSCRX_LMSW
5213 || (uNewCrX & ~(X86_CR0_TS | X86_CR0_MP)) != (uOldCrX & ~(X86_CR0_TS | X86_CR0_MP)))
5214 {
5215 Assert(enmAccessCrX != IEMACCESSCRX_CLTS);
5216 Log(("iemCImpl_load_Cr%#x: TS/MP bit changed or lmsw instr: Guest intercept -> #VMEXIT\n", iCrReg));
5217 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CR0_SEL_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5218 }
5219 }
5220
5221 /*
5222 * Change CR0.
5223 */
5224 if (!IEM_VERIFICATION_ENABLED(pVCpu))
5225 CPUMSetGuestCR0(pVCpu, uNewCrX);
5226 else
5227 pCtx->cr0 = uNewCrX;
5228 Assert(pCtx->cr0 == uNewCrX);
5229
5230 /*
5231 * Change EFER.LMA if entering or leaving long mode.
5232 */
5233 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
5234 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
5235 {
5236 uint64_t NewEFER = pCtx->msrEFER;
5237 if (uNewCrX & X86_CR0_PG)
5238 NewEFER |= MSR_K6_EFER_LMA;
5239 else
5240 NewEFER &= ~MSR_K6_EFER_LMA;
5241
5242 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5243 CPUMSetGuestEFER(pVCpu, NewEFER);
5244 else
5245 pCtx->msrEFER = NewEFER;
5246 Assert(pCtx->msrEFER == NewEFER);
5247 }
5248
5249 /*
5250 * Inform PGM.
5251 */
5252 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5253 {
5254 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
5255 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
5256 {
5257 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
5258 AssertRCReturn(rc, rc);
5259 /* ignore informational status codes */
5260 }
5261 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
5262 }
5263 else
5264 rcStrict = VINF_SUCCESS;
5265
5266#ifdef IN_RC
5267 /* Return to ring-3 for rescheduling if WP or AM changes. */
5268 if ( rcStrict == VINF_SUCCESS
5269 && ( (uNewCrX & (X86_CR0_WP | X86_CR0_AM))
5270 != (uOldCrX & (X86_CR0_WP | X86_CR0_AM))) )
5271 rcStrict = VINF_EM_RESCHEDULE;
5272#endif
5273 break;
5274 }
5275
5276 /*
5277 * CR2 can be changed without any restrictions.
5278 */
5279 case 2:
5280 {
5281 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 2))
5282 {
5283 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5284 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);
5285 }
5286 pCtx->cr2 = uNewCrX;
5287 rcStrict = VINF_SUCCESS;
5288 break;
5289 }
5290
5291 /*
5292 * CR3 is relatively simple, although AMD and Intel have different
5293 * accounts of how setting reserved bits are handled. We take intel's
5294 * word for the lower bits and AMD's for the high bits (63:52). The
5295 * lower reserved bits are ignored and left alone; OpenBSD 5.8 relies
5296 * on this.
5297 */
5298 /** @todo Testcase: Setting reserved bits in CR3, especially before
5299 * enabling paging. */
5300 case 3:
5301 {
5302 /* check / mask the value. */
5303 if (uNewCrX & UINT64_C(0xfff0000000000000))
5304 {
5305 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
5306 return iemRaiseGeneralProtectionFault0(pVCpu);
5307 }
5308
5309 uint64_t fValid;
5310 if ( (pCtx->cr4 & X86_CR4_PAE)
5311 && (pCtx->msrEFER & MSR_K6_EFER_LME))
5312 fValid = UINT64_C(0x000fffffffffffff);
5313 else
5314 fValid = UINT64_C(0xffffffff);
5315 if (uNewCrX & ~fValid)
5316 {
5317 Log(("Automatically clearing reserved MBZ bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
5318 uNewCrX, uNewCrX & ~fValid));
5319 uNewCrX &= fValid;
5320 }
5321
5322 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 3))
5323 {
5324 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5325 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);
5326 }
5327
5328 /** @todo If we're in PAE mode we should check the PDPTRs for
5329 * invalid bits. */
5330
5331 /* Make the change. */
5332 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5333 {
5334 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
5335 AssertRCSuccessReturn(rc, rc);
5336 }
5337 else
5338 pCtx->cr3 = uNewCrX;
5339
5340 /* Inform PGM. */
5341 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5342 {
5343 if (pCtx->cr0 & X86_CR0_PG)
5344 {
5345 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
5346 AssertRCReturn(rc, rc);
5347 /* ignore informational status codes */
5348 }
5349 }
5350 rcStrict = VINF_SUCCESS;
5351 break;
5352 }
5353
5354 /*
5355 * CR4 is a bit more tedious as there are bits which cannot be cleared
5356 * under some circumstances and such.
5357 */
5358 case 4:
5359 {
5360 uint64_t const uOldCrX = pCtx->cr4;
5361
5362 /** @todo Shouldn't this look at the guest CPUID bits to determine
5363 * valid bits? e.g. if guest CPUID doesn't allow X86_CR4_OSXMMEEXCPT, we
5364 * should #GP(0). */
5365 /* reserved bits */
5366 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
5367 | X86_CR4_TSD | X86_CR4_DE
5368 | X86_CR4_PSE | X86_CR4_PAE
5369 | X86_CR4_MCE | X86_CR4_PGE
5370 | X86_CR4_PCE | X86_CR4_OSFXSR
5371 | X86_CR4_OSXMMEEXCPT;
5372 //if (xxx)
5373 // fValid |= X86_CR4_VMXE;
5374 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fXSaveRstor)
5375 fValid |= X86_CR4_OSXSAVE;
5376 if (uNewCrX & ~(uint64_t)fValid)
5377 {
5378 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5379 return iemRaiseGeneralProtectionFault0(pVCpu);
5380 }
5381
5382 /* long mode checks. */
5383 if ( (uOldCrX & X86_CR4_PAE)
5384 && !(uNewCrX & X86_CR4_PAE)
5385 && CPUMIsGuestInLongModeEx(pCtx) )
5386 {
5387 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
5388 return iemRaiseGeneralProtectionFault0(pVCpu);
5389 }
5390
5391 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 4))
5392 {
5393 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5394 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
5395 }
5396
5397 /*
5398 * Change it.
5399 */
5400 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5401 {
5402 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
5403 AssertRCSuccessReturn(rc, rc);
5404 }
5405 else
5406 pCtx->cr4 = uNewCrX;
5407 Assert(pCtx->cr4 == uNewCrX);
5408
5409 /*
5410 * Notify SELM and PGM.
5411 */
5412 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5413 {
5414 /* SELM - VME may change things wrt to the TSS shadowing. */
5415 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
5416 {
5417 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
5418 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
5419#ifdef VBOX_WITH_RAW_MODE
5420 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
5421 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
5422#endif
5423 }
5424
5425 /* PGM - flushing and mode. */
5426 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE))
5427 {
5428 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
5429 AssertRCReturn(rc, rc);
5430 /* ignore informational status codes */
5431 }
5432 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
5433 }
5434 else
5435 rcStrict = VINF_SUCCESS;
5436 break;
5437 }
5438
5439 /*
5440 * CR8 maps to the APIC TPR.
5441 */
5442 case 8:
5443 {
5444 if (uNewCrX & ~(uint64_t)0xf)
5445 {
5446 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
5447 return iemRaiseGeneralProtectionFault0(pVCpu);
5448 }
5449
5450 uint8_t const u8Tpr = (uint8_t)uNewCrX << 4;
5451#ifdef VBOX_WITH_NESTED_HWVIRT
5452 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5453 {
5454 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 8))
5455 {
5456 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5457 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);
5458 }
5459
5460 pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u8VTPR = u8Tpr;
5461 if (pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u1VIntrMasking)
5462 {
5463 rcStrict = VINF_SUCCESS;
5464 break;
5465 }
5466 }
5467#endif
5468 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5469 APICSetTpr(pVCpu, u8Tpr);
5470 rcStrict = VINF_SUCCESS;
5471 break;
5472 }
5473
5474 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5475 }
5476
5477 /*
5478 * Advance the RIP on success.
5479 */
5480 if (RT_SUCCESS(rcStrict))
5481 {
5482 if (rcStrict != VINF_SUCCESS)
5483 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5484 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5485 }
5486
5487 return rcStrict;
5488}
5489
5490
5491/**
5492 * Implements mov CRx,GReg.
5493 *
5494 * @param iCrReg The CRx register to write (valid).
5495 * @param iGReg The general register to load the DRx value from.
5496 */
5497IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
5498{
5499 if (pVCpu->iem.s.uCpl != 0)
5500 return iemRaiseGeneralProtectionFault0(pVCpu);
5501 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
5502
5503 /*
5504 * Read the new value from the source register and call common worker.
5505 */
5506 uint64_t uNewCrX;
5507 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5508 uNewCrX = iemGRegFetchU64(pVCpu, iGReg);
5509 else
5510 uNewCrX = iemGRegFetchU32(pVCpu, iGReg);
5511 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, iCrReg, uNewCrX, IEMACCESSCRX_MOV_CRX, iGReg);
5512}
5513
5514
5515/**
5516 * Implements 'LMSW r/m16'
5517 *
5518 * @param u16NewMsw The new value.
5519 */
5520IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
5521{
5522 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5523
5524 if (pVCpu->iem.s.uCpl != 0)
5525 return iemRaiseGeneralProtectionFault0(pVCpu);
5526 Assert(!pCtx->eflags.Bits.u1VM);
5527
5528 /*
5529 * Compose the new CR0 value and call common worker.
5530 */
5531 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5532 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5533 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */);
5534}
5535
5536
5537/**
5538 * Implements 'CLTS'.
5539 */
5540IEM_CIMPL_DEF_0(iemCImpl_clts)
5541{
5542 if (pVCpu->iem.s.uCpl != 0)
5543 return iemRaiseGeneralProtectionFault0(pVCpu);
5544
5545 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5546 uint64_t uNewCr0 = pCtx->cr0;
5547 uNewCr0 &= ~X86_CR0_TS;
5548 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_CLTS, UINT8_MAX /* iGReg */);
5549}
5550
5551
5552/**
5553 * Implements mov GReg,DRx.
5554 *
5555 * @param iGReg The general register to store the DRx value in.
5556 * @param iDrReg The DRx register to read (0-7).
5557 */
5558IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
5559{
5560 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5561
5562 /*
5563 * Check preconditions.
5564 */
5565
5566 /* Raise GPs. */
5567 if (pVCpu->iem.s.uCpl != 0)
5568 return iemRaiseGeneralProtectionFault0(pVCpu);
5569 Assert(!pCtx->eflags.Bits.u1VM);
5570
5571 if ( (iDrReg == 4 || iDrReg == 5)
5572 && (pCtx->cr4 & X86_CR4_DE) )
5573 {
5574 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
5575 return iemRaiseGeneralProtectionFault0(pVCpu);
5576 }
5577
5578 /* Raise #DB if general access detect is enabled. */
5579 if (pCtx->dr[7] & X86_DR7_GD)
5580 {
5581 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
5582 return iemRaiseDebugException(pVCpu);
5583 }
5584
5585 /*
5586 * Read the debug register and store it in the specified general register.
5587 */
5588 uint64_t drX;
5589 switch (iDrReg)
5590 {
5591 case 0: drX = pCtx->dr[0]; break;
5592 case 1: drX = pCtx->dr[1]; break;
5593 case 2: drX = pCtx->dr[2]; break;
5594 case 3: drX = pCtx->dr[3]; break;
5595 case 6:
5596 case 4:
5597 drX = pCtx->dr[6];
5598 drX |= X86_DR6_RA1_MASK;
5599 drX &= ~X86_DR6_RAZ_MASK;
5600 break;
5601 case 7:
5602 case 5:
5603 drX = pCtx->dr[7];
5604 drX |=X86_DR7_RA1_MASK;
5605 drX &= ~X86_DR7_RAZ_MASK;
5606 break;
5607 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5608 }
5609
5610 /** @todo SVM nested-guest intercept for DR8-DR15? */
5611 /*
5612 * Check for any SVM nested-guest intercepts for the DRx read.
5613 */
5614 if (IEM_IS_SVM_READ_DR_INTERCEPT_SET(pVCpu, iDrReg))
5615 {
5616 Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg));
5617 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),
5618 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
5619 }
5620
5621 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5622 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = drX;
5623 else
5624 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)drX;
5625
5626 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5627 return VINF_SUCCESS;
5628}
5629
5630
5631/**
5632 * Implements mov DRx,GReg.
5633 *
5634 * @param iDrReg The DRx register to write (valid).
5635 * @param iGReg The general register to load the DRx value from.
5636 */
5637IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
5638{
5639 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5640
5641 /*
5642 * Check preconditions.
5643 */
5644 if (pVCpu->iem.s.uCpl != 0)
5645 return iemRaiseGeneralProtectionFault0(pVCpu);
5646 Assert(!pCtx->eflags.Bits.u1VM);
5647
5648 if (iDrReg == 4 || iDrReg == 5)
5649 {
5650 if (pCtx->cr4 & X86_CR4_DE)
5651 {
5652 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
5653 return iemRaiseGeneralProtectionFault0(pVCpu);
5654 }
5655 iDrReg += 2;
5656 }
5657
5658 /* Raise #DB if general access detect is enabled. */
5659 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
5660 * \#GP? */
5661 if (pCtx->dr[7] & X86_DR7_GD)
5662 {
5663 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
5664 return iemRaiseDebugException(pVCpu);
5665 }
5666
5667 /*
5668 * Read the new value from the source register.
5669 */
5670 uint64_t uNewDrX;
5671 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5672 uNewDrX = iemGRegFetchU64(pVCpu, iGReg);
5673 else
5674 uNewDrX = iemGRegFetchU32(pVCpu, iGReg);
5675
5676 /*
5677 * Adjust it.
5678 */
5679 switch (iDrReg)
5680 {
5681 case 0:
5682 case 1:
5683 case 2:
5684 case 3:
5685 /* nothing to adjust */
5686 break;
5687
5688 case 6:
5689 if (uNewDrX & X86_DR6_MBZ_MASK)
5690 {
5691 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5692 return iemRaiseGeneralProtectionFault0(pVCpu);
5693 }
5694 uNewDrX |= X86_DR6_RA1_MASK;
5695 uNewDrX &= ~X86_DR6_RAZ_MASK;
5696 break;
5697
5698 case 7:
5699 if (uNewDrX & X86_DR7_MBZ_MASK)
5700 {
5701 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5702 return iemRaiseGeneralProtectionFault0(pVCpu);
5703 }
5704 uNewDrX |= X86_DR7_RA1_MASK;
5705 uNewDrX &= ~X86_DR7_RAZ_MASK;
5706 break;
5707
5708 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5709 }
5710
5711 /** @todo SVM nested-guest intercept for DR8-DR15? */
5712 /*
5713 * Check for any SVM nested-guest intercepts for the DRx write.
5714 */
5715 if (IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(pVCpu, iDrReg))
5716 {
5717 Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg));
5718 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),
5719 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
5720 }
5721
5722 /*
5723 * Do the actual setting.
5724 */
5725 if (!IEM_VERIFICATION_ENABLED(pVCpu))
5726 {
5727 int rc = CPUMSetGuestDRx(pVCpu, iDrReg, uNewDrX);
5728 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc);
5729 }
5730 else
5731 pCtx->dr[iDrReg] = uNewDrX;
5732
5733 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5734 return VINF_SUCCESS;
5735}
5736
5737
5738/**
5739 * Implements 'INVLPG m'.
5740 *
5741 * @param GCPtrPage The effective address of the page to invalidate.
5742 * @remarks Updates the RIP.
5743 */
5744IEM_CIMPL_DEF_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage)
5745{
5746 /* ring-0 only. */
5747 if (pVCpu->iem.s.uCpl != 0)
5748 return iemRaiseGeneralProtectionFault0(pVCpu);
5749 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
5750
5751 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
5752 {
5753 Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
5754 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_INVLPG,
5755 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? GCPtrPage : 0, 0 /* uExitInfo2 */);
5756 }
5757
5758 int rc = PGMInvalidatePage(pVCpu, GCPtrPage);
5759 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5760
5761 if (rc == VINF_SUCCESS)
5762 return VINF_SUCCESS;
5763 if (rc == VINF_PGM_SYNC_CR3)
5764 return iemSetPassUpStatus(pVCpu, rc);
5765
5766 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
5767 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", GCPtrPage, rc));
5768 return rc;
5769}
5770
5771
5772/**
5773 * Implements RDTSC.
5774 */
5775IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
5776{
5777 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5778
5779 /*
5780 * Check preconditions.
5781 */
5782 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fTsc)
5783 return iemRaiseUndefinedOpcode(pVCpu);
5784
5785 if ( (pCtx->cr4 & X86_CR4_TSD)
5786 && pVCpu->iem.s.uCpl != 0)
5787 {
5788 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5789 return iemRaiseGeneralProtectionFault0(pVCpu);
5790 }
5791
5792 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
5793 {
5794 Log(("rdtsc: Guest intercept -> #VMEXIT\n"));
5795 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5796 }
5797
5798 /*
5799 * Do the job.
5800 */
5801 uint64_t uTicks = TMCpuTickGet(pVCpu);
5802 pCtx->rax = RT_LO_U32(uTicks);
5803 pCtx->rdx = RT_HI_U32(uTicks);
5804#ifdef IEM_VERIFICATION_MODE_FULL
5805 pVCpu->iem.s.fIgnoreRaxRdx = true;
5806#endif
5807
5808 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5809 return VINF_SUCCESS;
5810}
5811
5812
5813/**
5814 * Implements RDTSC.
5815 */
5816IEM_CIMPL_DEF_0(iemCImpl_rdtscp)
5817{
5818 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5819
5820 /*
5821 * Check preconditions.
5822 */
5823 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fRdTscP)
5824 return iemRaiseUndefinedOpcode(pVCpu);
5825
5826 if ( (pCtx->cr4 & X86_CR4_TSD)
5827 && pVCpu->iem.s.uCpl != 0)
5828 {
5829 Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5830 return iemRaiseGeneralProtectionFault0(pVCpu);
5831 }
5832
5833 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
5834 {
5835 Log(("rdtscp: Guest intercept -> #VMEXIT\n"));
5836 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5837 }
5838
5839 /*
5840 * Do the job.
5841 * Query the MSR first in case of trips to ring-3.
5842 */
5843 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx);
5844 if (rcStrict == VINF_SUCCESS)
5845 {
5846 /* Low dword of the TSC_AUX msr only. */
5847 pCtx->rcx &= UINT32_C(0xffffffff);
5848
5849 uint64_t uTicks = TMCpuTickGet(pVCpu);
5850 pCtx->rax = RT_LO_U32(uTicks);
5851 pCtx->rdx = RT_HI_U32(uTicks);
5852#ifdef IEM_VERIFICATION_MODE_FULL
5853 pVCpu->iem.s.fIgnoreRaxRdx = true;
5854#endif
5855 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5856 }
5857 return rcStrict;
5858}
5859
5860
5861/**
5862 * Implements RDPMC.
5863 */
5864IEM_CIMPL_DEF_0(iemCImpl_rdpmc)
5865{
5866 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5867 if ( pVCpu->iem.s.uCpl != 0
5868 && !(pCtx->cr4 & X86_CR4_PCE))
5869 return iemRaiseGeneralProtectionFault0(pVCpu);
5870
5871 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
5872 {
5873 Log(("rdpmc: Guest intercept -> #VMEXIT\n"));
5874 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5875 }
5876
5877 /** @todo Implement RDPMC for the regular guest execution case (the above only
5878 * handles nested-guest intercepts). */
5879 RT_NOREF(cbInstr);
5880 return VERR_IEM_INSTR_NOT_IMPLEMENTED;
5881}
5882
5883
5884/**
5885 * Implements RDMSR.
5886 */
5887IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
5888{
5889 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5890
5891 /*
5892 * Check preconditions.
5893 */
5894 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
5895 return iemRaiseUndefinedOpcode(pVCpu);
5896 if (pVCpu->iem.s.uCpl != 0)
5897 return iemRaiseGeneralProtectionFault0(pVCpu);
5898
5899 /*
5900 * Do the job.
5901 */
5902 RTUINT64U uValue;
5903 VBOXSTRICTRC rcStrict;
5904 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
5905 {
5906 rcStrict = IEM_SVM_NST_GST_MSR_INTERCEPT(pVCpu, pCtx->ecx, false /* fWrite */);
5907 if (rcStrict == VINF_SVM_VMEXIT)
5908 return VINF_SUCCESS;
5909 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
5910 {
5911 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pCtx->ecx, VBOXSTRICTRC_VAL(rcStrict)));
5912 return rcStrict;
5913 }
5914 }
5915
5916 rcStrict = CPUMQueryGuestMsr(pVCpu, pCtx->ecx, &uValue.u);
5917 if (rcStrict == VINF_SUCCESS)
5918 {
5919 pCtx->rax = uValue.s.Lo;
5920 pCtx->rdx = uValue.s.Hi;
5921
5922 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5923 return VINF_SUCCESS;
5924 }
5925
5926#ifndef IN_RING3
5927 /* Deferred to ring-3. */
5928 if (rcStrict == VINF_CPUM_R3_MSR_READ)
5929 {
5930 Log(("IEM: rdmsr(%#x) -> ring-3\n", pCtx->ecx));
5931 return rcStrict;
5932 }
5933#else /* IN_RING3 */
5934 /* Often a unimplemented MSR or MSR bit, so worth logging. */
5935 static uint32_t s_cTimes = 0;
5936 if (s_cTimes++ < 10)
5937 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
5938 else
5939#endif
5940 Log(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
5941 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
5942 return iemRaiseGeneralProtectionFault0(pVCpu);
5943}
5944
5945
5946/**
5947 * Implements WRMSR.
5948 */
5949IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
5950{
5951 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5952
5953 /*
5954 * Check preconditions.
5955 */
5956 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
5957 return iemRaiseUndefinedOpcode(pVCpu);
5958 if (pVCpu->iem.s.uCpl != 0)
5959 return iemRaiseGeneralProtectionFault0(pVCpu);
5960
5961 /*
5962 * Do the job.
5963 */
5964 RTUINT64U uValue;
5965 uValue.s.Lo = pCtx->eax;
5966 uValue.s.Hi = pCtx->edx;
5967
5968 VBOXSTRICTRC rcStrict;
5969 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
5970 {
5971 rcStrict = IEM_SVM_NST_GST_MSR_INTERCEPT(pVCpu, pCtx->ecx, true /* fWrite */);
5972 if (rcStrict == VINF_SVM_VMEXIT)
5973 return VINF_SUCCESS;
5974 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
5975 {
5976 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pCtx->ecx, VBOXSTRICTRC_VAL(rcStrict)));
5977 return rcStrict;
5978 }
5979 }
5980
5981 if (!IEM_VERIFICATION_ENABLED(pVCpu))
5982 rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u);
5983 else
5984 {
5985#ifdef IN_RING3
5986 CPUMCTX CtxTmp = *pCtx;
5987 rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u);
5988 PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
5989 *pCtx = *pCtx2;
5990 *pCtx2 = CtxTmp;
5991#else
5992 AssertReleaseFailedReturn(VERR_IEM_IPE_2);
5993#endif
5994 }
5995 if (rcStrict == VINF_SUCCESS)
5996 {
5997 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5998 return VINF_SUCCESS;
5999 }
6000
6001#ifndef IN_RING3
6002 /* Deferred to ring-3. */
6003 if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
6004 {
6005 Log(("IEM: wrmsr(%#x) -> ring-3\n", pCtx->ecx));
6006 return rcStrict;
6007 }
6008#else /* IN_RING3 */
6009 /* Often a unimplemented MSR or MSR bit, so worth logging. */
6010 static uint32_t s_cTimes = 0;
6011 if (s_cTimes++ < 10)
6012 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
6013 else
6014#endif
6015 Log(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
6016 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
6017 return iemRaiseGeneralProtectionFault0(pVCpu);
6018}
6019
6020
6021/**
6022 * Implements 'IN eAX, port'.
6023 *
6024 * @param u16Port The source port.
6025 * @param cbReg The register size.
6026 */
6027IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
6028{
6029 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6030
6031 /*
6032 * CPL check
6033 */
6034 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx, u16Port, cbReg);
6035 if (rcStrict != VINF_SUCCESS)
6036 return rcStrict;
6037
6038 /*
6039 * Check SVM nested-guest IO intercept.
6040 */
6041 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
6042 {
6043 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, cbReg, 0 /* N/A - cAddrSizeBits */,
6044 0 /* N/A - iEffSeg */, false /* fRep */, false /* fStrIo */, cbInstr);
6045 if (rcStrict == VINF_SVM_VMEXIT)
6046 return VINF_SUCCESS;
6047 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
6048 {
6049 Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
6050 VBOXSTRICTRC_VAL(rcStrict)));
6051 return rcStrict;
6052 }
6053 }
6054
6055 /*
6056 * Perform the I/O.
6057 */
6058 uint32_t u32Value;
6059 if (!IEM_VERIFICATION_ENABLED(pVCpu))
6060 rcStrict = IOMIOPortRead(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, &u32Value, cbReg);
6061 else
6062 rcStrict = iemVerifyFakeIOPortRead(pVCpu, u16Port, &u32Value, cbReg);
6063 if (IOM_SUCCESS(rcStrict))
6064 {
6065 switch (cbReg)
6066 {
6067 case 1: pCtx->al = (uint8_t)u32Value; break;
6068 case 2: pCtx->ax = (uint16_t)u32Value; break;
6069 case 4: pCtx->rax = u32Value; break;
6070 default: AssertFailedReturn(VERR_IEM_IPE_3);
6071 }
6072 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6073 pVCpu->iem.s.cPotentialExits++;
6074 if (rcStrict != VINF_SUCCESS)
6075 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6076 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
6077
6078 /*
6079 * Check for I/O breakpoints.
6080 */
6081 uint32_t const uDr7 = pCtx->dr[7];
6082 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
6083 && X86_DR7_ANY_RW_IO(uDr7)
6084 && (pCtx->cr4 & X86_CR4_DE))
6085 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM))))
6086 {
6087 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx, u16Port, cbReg);
6088 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6089 rcStrict = iemRaiseDebugException(pVCpu);
6090 }
6091 }
6092
6093 return rcStrict;
6094}
6095
6096
6097/**
6098 * Implements 'IN eAX, DX'.
6099 *
6100 * @param cbReg The register size.
6101 */
6102IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
6103{
6104 return IEM_CIMPL_CALL_2(iemCImpl_in, IEM_GET_CTX(pVCpu)->dx, cbReg);
6105}
6106
6107
6108/**
6109 * Implements 'OUT port, eAX'.
6110 *
6111 * @param u16Port The destination port.
6112 * @param cbReg The register size.
6113 */
6114IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
6115{
6116 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6117
6118 /*
6119 * CPL check
6120 */
6121 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx, u16Port, cbReg);
6122 if (rcStrict != VINF_SUCCESS)
6123 return rcStrict;
6124
6125 /*
6126 * Check SVM nested-guest IO intercept.
6127 */
6128 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
6129 {
6130 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, cbReg, 0 /* N/A - cAddrSizeBits */,
6131 0 /* N/A - iEffSeg */, false /* fRep */, false /* fStrIo */, cbInstr);
6132 if (rcStrict == VINF_SVM_VMEXIT)
6133 return VINF_SUCCESS;
6134 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
6135 {
6136 Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
6137 VBOXSTRICTRC_VAL(rcStrict)));
6138 return rcStrict;
6139 }
6140 }
6141
6142 /*
6143 * Perform the I/O.
6144 */
6145 uint32_t u32Value;
6146 switch (cbReg)
6147 {
6148 case 1: u32Value = pCtx->al; break;
6149 case 2: u32Value = pCtx->ax; break;
6150 case 4: u32Value = pCtx->eax; break;
6151 default: AssertFailedReturn(VERR_IEM_IPE_4);
6152 }
6153 if (!IEM_VERIFICATION_ENABLED(pVCpu))
6154 rcStrict = IOMIOPortWrite(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, u32Value, cbReg);
6155 else
6156 rcStrict = iemVerifyFakeIOPortWrite(pVCpu, u16Port, u32Value, cbReg);
6157 if (IOM_SUCCESS(rcStrict))
6158 {
6159 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6160 pVCpu->iem.s.cPotentialExits++;
6161 if (rcStrict != VINF_SUCCESS)
6162 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6163 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
6164
6165 /*
6166 * Check for I/O breakpoints.
6167 */
6168 uint32_t const uDr7 = pCtx->dr[7];
6169 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
6170 && X86_DR7_ANY_RW_IO(uDr7)
6171 && (pCtx->cr4 & X86_CR4_DE))
6172 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM))))
6173 {
6174 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx, u16Port, cbReg);
6175 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6176 rcStrict = iemRaiseDebugException(pVCpu);
6177 }
6178 }
6179 return rcStrict;
6180}
6181
6182
6183/**
6184 * Implements 'OUT DX, eAX'.
6185 *
6186 * @param cbReg The register size.
6187 */
6188IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
6189{
6190 return IEM_CIMPL_CALL_2(iemCImpl_out, IEM_GET_CTX(pVCpu)->dx, cbReg);
6191}
6192
6193
6194#ifdef VBOX_WITH_NESTED_HWVIRT
6195/**
6196 * Implements 'VMRUN'.
6197 */
6198IEM_CIMPL_DEF_0(iemCImpl_vmrun)
6199{
6200 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6201 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);
6202
6203 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
6204 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
6205 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
6206 {
6207 Log(("vmrun: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
6208 return iemRaiseGeneralProtectionFault0(pVCpu);
6209 }
6210
6211 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))
6212 {
6213 Log(("vmrun: Guest intercept -> #VMEXIT\n"));
6214 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6215 }
6216
6217 VBOXSTRICTRC rcStrict = HMSvmVmrun(pVCpu, pCtx, cbInstr, GCPhysVmcb);
6218 /* If VMRUN execution causes a #VMEXIT, we continue executing the instruction following the VMRUN. */
6219 if (rcStrict == VINF_SVM_VMEXIT)
6220 {
6221 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6222 rcStrict = VINF_SUCCESS;
6223 }
6224 else if (rcStrict == VERR_SVM_VMEXIT_FAILED)
6225 rcStrict = iemInitiateCpuShutdown(pVCpu);
6226 return rcStrict;
6227}
6228
6229
6230/**
6231 * Implements 'VMMCALL'.
6232 */
6233IEM_CIMPL_DEF_0(iemCImpl_vmmcall)
6234{
6235 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6236 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))
6237 {
6238 Log(("vmmcall: Guest intercept -> #VMEXIT\n"));
6239 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6240 }
6241
6242 bool fUpdatedRipAndRF;
6243 VBOXSTRICTRC rcStrict = HMSvmVmmcall(pVCpu, pCtx, &fUpdatedRipAndRF);
6244 if (RT_SUCCESS(rcStrict))
6245 {
6246 if (!fUpdatedRipAndRF)
6247 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6248 return rcStrict;
6249 }
6250
6251 return iemRaiseUndefinedOpcode(pVCpu);
6252}
6253
6254
6255/**
6256 * Implements 'VMLOAD'.
6257 */
6258IEM_CIMPL_DEF_0(iemCImpl_vmload)
6259{
6260 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6261 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);
6262
6263 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
6264 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
6265 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
6266 {
6267 Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
6268 return iemRaiseGeneralProtectionFault0(pVCpu);
6269 }
6270
6271 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
6272 {
6273 Log(("vmload: Guest intercept -> #VMEXIT\n"));
6274 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6275 }
6276
6277 void *pvVmcb;
6278 PGMPAGEMAPLOCK PgLockVmcb;
6279 VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, &pvVmcb, &PgLockVmcb);
6280 if (rcStrict == VINF_SUCCESS)
6281 {
6282 PCSVMVMCB pVmcb = (PCSVMVMCB)pvVmcb;
6283 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &pVmcb->guest, FS, fs);
6284 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &pVmcb->guest, GS, gs);
6285 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &pVmcb->guest, TR, tr);
6286 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &pVmcb->guest, LDTR, ldtr);
6287
6288 pCtx->msrKERNELGSBASE = pVmcb->guest.u64KernelGSBase;
6289 pCtx->msrSTAR = pVmcb->guest.u64STAR;
6290 pCtx->msrLSTAR = pVmcb->guest.u64LSTAR;
6291 pCtx->msrCSTAR = pVmcb->guest.u64CSTAR;
6292 pCtx->msrSFMASK = pVmcb->guest.u64SFMASK;
6293
6294 pCtx->SysEnter.cs = pVmcb->guest.u64SysEnterCS;
6295 pCtx->SysEnter.esp = pVmcb->guest.u64SysEnterESP;
6296 pCtx->SysEnter.eip = pVmcb->guest.u64SysEnterEIP;
6297
6298 iemMemPageUnmap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, pvVmcb, &PgLockVmcb);
6299 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6300 }
6301 return rcStrict;
6302}
6303
6304
6305/**
6306 * Implements 'VMSAVE'.
6307 */
6308IEM_CIMPL_DEF_0(iemCImpl_vmsave)
6309{
6310 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6311 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave);
6312
6313 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
6314 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
6315 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
6316 {
6317 Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
6318 return iemRaiseGeneralProtectionFault0(pVCpu);
6319 }
6320
6321 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
6322 {
6323 Log(("vmsave: Guest intercept -> #VMEXIT\n"));
6324 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6325 }
6326
6327 void *pvVmcb;
6328 PGMPAGEMAPLOCK PgLockVmcb;
6329 VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_RW, &pvVmcb, &PgLockVmcb);
6330 if (rcStrict == VINF_SUCCESS)
6331 {
6332 PSVMVMCB pVmcb = (PSVMVMCB)pvVmcb;
6333 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, FS, fs);
6334 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, GS, gs);
6335 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, TR, tr);
6336 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, LDTR, ldtr);
6337
6338 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;
6339 pVmcb->guest.u64STAR = pCtx->msrSTAR;
6340 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR;
6341 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR;
6342 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK;
6343
6344 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs;
6345 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp;
6346 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
6347
6348 iemMemPageUnmap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, pvVmcb, &PgLockVmcb);
6349 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6350 }
6351 return rcStrict;
6352}
6353
6354
6355/**
6356 * Implements 'CLGI'.
6357 */
6358IEM_CIMPL_DEF_0(iemCImpl_clgi)
6359{
6360 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6361 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi);
6362 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
6363 {
6364 Log(("clgi: Guest intercept -> #VMEXIT\n"));
6365 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6366 }
6367
6368 pCtx->hwvirt.svm.fGif = 0;
6369 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6370#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
6371 pVCpu->iem.s.fForceIemExec = true;
6372#endif
6373 return VINF_SUCCESS;
6374}
6375
6376
6377/**
6378 * Implements 'STGI'.
6379 */
6380IEM_CIMPL_DEF_0(iemCImpl_stgi)
6381{
6382 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6383 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi);
6384 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))
6385 {
6386 Log2(("stgi: Guest intercept -> #VMEXIT\n"));
6387 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6388 }
6389
6390 pCtx->hwvirt.svm.fGif = 1;
6391 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6392#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
6393 pVCpu->iem.s.fForceIemExec = false;
6394#endif
6395 return VINF_SUCCESS;
6396}
6397
6398
6399/**
6400 * Implements 'INVLPGA'.
6401 */
6402IEM_CIMPL_DEF_0(iemCImpl_invlpga)
6403{
6404 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6405 RTGCPTR const GCPtrPage = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
6406 /** @todo PGM needs virtual ASID support. */
6407#if 0
6408 uint32_t const uAsid = pCtx->ecx;
6409#endif
6410
6411 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
6412 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
6413 {
6414 Log2(("invlpga: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
6415 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6416 }
6417
6418 PGMInvalidatePage(pVCpu, GCPtrPage);
6419 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6420 return VINF_SUCCESS;
6421}
6422
6423
6424/**
6425 * Implements 'SKINIT'.
6426 */
6427IEM_CIMPL_DEF_0(iemCImpl_skinit)
6428{
6429 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
6430
6431 uint32_t uIgnore;
6432 uint32_t fFeaturesECX;
6433 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &uIgnore, &uIgnore, &fFeaturesECX, &uIgnore);
6434 if (!(fFeaturesECX & X86_CPUID_AMD_FEATURE_ECX_SKINIT))
6435 return iemRaiseUndefinedOpcode(pVCpu);
6436
6437 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))
6438 {
6439 Log2(("skinit: Guest intercept -> #VMEXIT\n"));
6440 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SKINIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6441 }
6442
6443 RT_NOREF(cbInstr);
6444 return VERR_IEM_INSTR_NOT_IMPLEMENTED;
6445}
6446#endif /* VBOX_WITH_NESTED_HWVIRT */
6447
6448/**
6449 * Implements 'CLI'.
6450 */
6451IEM_CIMPL_DEF_0(iemCImpl_cli)
6452{
6453 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6454 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
6455 uint32_t const fEflOld = fEfl;
6456 if (pCtx->cr0 & X86_CR0_PE)
6457 {
6458 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
6459 if (!(fEfl & X86_EFL_VM))
6460 {
6461 if (pVCpu->iem.s.uCpl <= uIopl)
6462 fEfl &= ~X86_EFL_IF;
6463 else if ( pVCpu->iem.s.uCpl == 3
6464 && (pCtx->cr4 & X86_CR4_PVI) )
6465 fEfl &= ~X86_EFL_VIF;
6466 else
6467 return iemRaiseGeneralProtectionFault0(pVCpu);
6468 }
6469 /* V8086 */
6470 else if (uIopl == 3)
6471 fEfl &= ~X86_EFL_IF;
6472 else if ( uIopl < 3
6473 && (pCtx->cr4 & X86_CR4_VME) )
6474 fEfl &= ~X86_EFL_VIF;
6475 else
6476 return iemRaiseGeneralProtectionFault0(pVCpu);
6477 }
6478 /* real mode */
6479 else
6480 fEfl &= ~X86_EFL_IF;
6481
6482 /* Commit. */
6483 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
6484 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6485 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld);
6486 return VINF_SUCCESS;
6487}
6488
6489
6490/**
6491 * Implements 'STI'.
6492 */
6493IEM_CIMPL_DEF_0(iemCImpl_sti)
6494{
6495 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6496 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
6497 uint32_t const fEflOld = fEfl;
6498
6499 if (pCtx->cr0 & X86_CR0_PE)
6500 {
6501 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
6502 if (!(fEfl & X86_EFL_VM))
6503 {
6504 if (pVCpu->iem.s.uCpl <= uIopl)
6505 fEfl |= X86_EFL_IF;
6506 else if ( pVCpu->iem.s.uCpl == 3
6507 && (pCtx->cr4 & X86_CR4_PVI)
6508 && !(fEfl & X86_EFL_VIP) )
6509 fEfl |= X86_EFL_VIF;
6510 else
6511 return iemRaiseGeneralProtectionFault0(pVCpu);
6512 }
6513 /* V8086 */
6514 else if (uIopl == 3)
6515 fEfl |= X86_EFL_IF;
6516 else if ( uIopl < 3
6517 && (pCtx->cr4 & X86_CR4_VME)
6518 && !(fEfl & X86_EFL_VIP) )
6519 fEfl |= X86_EFL_VIF;
6520 else
6521 return iemRaiseGeneralProtectionFault0(pVCpu);
6522 }
6523 /* real mode */
6524 else
6525 fEfl |= X86_EFL_IF;
6526
6527 /* Commit. */
6528 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
6529 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6530 if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
6531 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
6532 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
6533 return VINF_SUCCESS;
6534}
6535
6536
6537/**
6538 * Implements 'HLT'.
6539 */
6540IEM_CIMPL_DEF_0(iemCImpl_hlt)
6541{
6542 if (pVCpu->iem.s.uCpl != 0)
6543 return iemRaiseGeneralProtectionFault0(pVCpu);
6544
6545 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_HLT))
6546 {
6547 Log2(("hlt: Guest intercept -> #VMEXIT\n"));
6548 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6549 }
6550
6551 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6552 return VINF_EM_HALT;
6553}
6554
6555
6556/**
6557 * Implements 'MONITOR'.
6558 */
6559IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
6560{
6561 /*
6562 * Permission checks.
6563 */
6564 if (pVCpu->iem.s.uCpl != 0)
6565 {
6566 Log2(("monitor: CPL != 0\n"));
6567 return iemRaiseUndefinedOpcode(pVCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
6568 }
6569 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
6570 {
6571 Log2(("monitor: Not in CPUID\n"));
6572 return iemRaiseUndefinedOpcode(pVCpu);
6573 }
6574
6575 /*
6576 * Gather the operands and validate them.
6577 */
6578 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6579 RTGCPTR GCPtrMem = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
6580 uint32_t uEcx = pCtx->ecx;
6581 uint32_t uEdx = pCtx->edx;
6582/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
6583 * \#GP first. */
6584 if (uEcx != 0)
6585 {
6586 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx);
6587 return iemRaiseGeneralProtectionFault0(pVCpu);
6588 }
6589
6590 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
6591 if (rcStrict != VINF_SUCCESS)
6592 return rcStrict;
6593
6594 RTGCPHYS GCPhysMem;
6595 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
6596 if (rcStrict != VINF_SUCCESS)
6597 return rcStrict;
6598
6599 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
6600 {
6601 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
6602 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6603 }
6604
6605 /*
6606 * Call EM to prepare the monitor/wait.
6607 */
6608 rcStrict = EMMonitorWaitPrepare(pVCpu, pCtx->rax, pCtx->rcx, pCtx->rdx, GCPhysMem);
6609 Assert(rcStrict == VINF_SUCCESS);
6610
6611 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6612 return rcStrict;
6613}
6614
6615
6616/**
6617 * Implements 'MWAIT'.
6618 */
6619IEM_CIMPL_DEF_0(iemCImpl_mwait)
6620{
6621 /*
6622 * Permission checks.
6623 */
6624 if (pVCpu->iem.s.uCpl != 0)
6625 {
6626 Log2(("mwait: CPL != 0\n"));
6627 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
6628 * EFLAGS.VM then.) */
6629 return iemRaiseUndefinedOpcode(pVCpu);
6630 }
6631 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
6632 {
6633 Log2(("mwait: Not in CPUID\n"));
6634 return iemRaiseUndefinedOpcode(pVCpu);
6635 }
6636
6637 /*
6638 * Gather the operands and validate them.
6639 */
6640 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6641 uint32_t uEax = pCtx->eax;
6642 uint32_t uEcx = pCtx->ecx;
6643 if (uEcx != 0)
6644 {
6645 /* Only supported extension is break on IRQ when IF=0. */
6646 if (uEcx > 1)
6647 {
6648 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
6649 return iemRaiseGeneralProtectionFault0(pVCpu);
6650 }
6651 uint32_t fMWaitFeatures = 0;
6652 uint32_t uIgnore = 0;
6653 CPUMGetGuestCpuId(pVCpu, 5, 0, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
6654 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
6655 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
6656 {
6657 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
6658 return iemRaiseGeneralProtectionFault0(pVCpu);
6659 }
6660 }
6661
6662 /*
6663 * Check SVM nested-guest mwait intercepts.
6664 */
6665 if ( IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT_ARMED)
6666 && EMMonitorIsArmed(pVCpu))
6667 {
6668 Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n"));
6669 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6670 }
6671 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
6672 {
6673 Log2(("mwait: Guest intercept -> #VMEXIT\n"));
6674 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6675 }
6676
6677 /*
6678 * Call EM to prepare the monitor/wait.
6679 */
6680 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(pVCpu, uEax, uEcx);
6681
6682 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6683 return rcStrict;
6684}
6685
6686
6687/**
6688 * Implements 'SWAPGS'.
6689 */
6690IEM_CIMPL_DEF_0(iemCImpl_swapgs)
6691{
6692 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); /* Caller checks this. */
6693
6694 /*
6695 * Permission checks.
6696 */
6697 if (pVCpu->iem.s.uCpl != 0)
6698 {
6699 Log2(("swapgs: CPL != 0\n"));
6700 return iemRaiseUndefinedOpcode(pVCpu);
6701 }
6702
6703 /*
6704 * Do the job.
6705 */
6706 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6707 uint64_t uOtherGsBase = pCtx->msrKERNELGSBASE;
6708 pCtx->msrKERNELGSBASE = pCtx->gs.u64Base;
6709 pCtx->gs.u64Base = uOtherGsBase;
6710
6711 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6712 return VINF_SUCCESS;
6713}
6714
6715
6716/**
6717 * Implements 'CPUID'.
6718 */
6719IEM_CIMPL_DEF_0(iemCImpl_cpuid)
6720{
6721 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6722
6723 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
6724 {
6725 Log2(("cpuid: Guest intercept -> #VMEXIT\n"));
6726 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6727 }
6728
6729 CPUMGetGuestCpuId(pVCpu, pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
6730 pCtx->rax &= UINT32_C(0xffffffff);
6731 pCtx->rbx &= UINT32_C(0xffffffff);
6732 pCtx->rcx &= UINT32_C(0xffffffff);
6733 pCtx->rdx &= UINT32_C(0xffffffff);
6734
6735 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6736 return VINF_SUCCESS;
6737}
6738
6739
6740/**
6741 * Implements 'AAD'.
6742 *
6743 * @param bImm The immediate operand.
6744 */
6745IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
6746{
6747 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6748
6749 uint16_t const ax = pCtx->ax;
6750 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
6751 pCtx->ax = al;
6752 iemHlpUpdateArithEFlagsU8(pVCpu, al,
6753 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
6754 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
6755
6756 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6757 return VINF_SUCCESS;
6758}
6759
6760
6761/**
6762 * Implements 'AAM'.
6763 *
6764 * @param bImm The immediate operand. Cannot be 0.
6765 */
6766IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
6767{
6768 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6769 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
6770
6771 uint16_t const ax = pCtx->ax;
6772 uint8_t const al = (uint8_t)ax % bImm;
6773 uint8_t const ah = (uint8_t)ax / bImm;
6774 pCtx->ax = (ah << 8) + al;
6775 iemHlpUpdateArithEFlagsU8(pVCpu, al,
6776 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
6777 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
6778
6779 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6780 return VINF_SUCCESS;
6781}
6782
6783
6784/**
6785 * Implements 'DAA'.
6786 */
6787IEM_CIMPL_DEF_0(iemCImpl_daa)
6788{
6789 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6790
6791 uint8_t const al = pCtx->al;
6792 bool const fCarry = pCtx->eflags.Bits.u1CF;
6793
6794 if ( pCtx->eflags.Bits.u1AF
6795 || (al & 0xf) >= 10)
6796 {
6797 pCtx->al = al + 6;
6798 pCtx->eflags.Bits.u1AF = 1;
6799 }
6800 else
6801 pCtx->eflags.Bits.u1AF = 0;
6802
6803 if (al >= 0x9a || fCarry)
6804 {
6805 pCtx->al += 0x60;
6806 pCtx->eflags.Bits.u1CF = 1;
6807 }
6808 else
6809 pCtx->eflags.Bits.u1CF = 0;
6810
6811 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6812 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6813 return VINF_SUCCESS;
6814}
6815
6816
6817/**
6818 * Implements 'DAS'.
6819 */
6820IEM_CIMPL_DEF_0(iemCImpl_das)
6821{
6822 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6823
6824 uint8_t const uInputAL = pCtx->al;
6825 bool const fCarry = pCtx->eflags.Bits.u1CF;
6826
6827 if ( pCtx->eflags.Bits.u1AF
6828 || (uInputAL & 0xf) >= 10)
6829 {
6830 pCtx->eflags.Bits.u1AF = 1;
6831 if (uInputAL < 6)
6832 pCtx->eflags.Bits.u1CF = 1;
6833 pCtx->al = uInputAL - 6;
6834 }
6835 else
6836 {
6837 pCtx->eflags.Bits.u1AF = 0;
6838 pCtx->eflags.Bits.u1CF = 0;
6839 }
6840
6841 if (uInputAL >= 0x9a || fCarry)
6842 {
6843 pCtx->al -= 0x60;
6844 pCtx->eflags.Bits.u1CF = 1;
6845 }
6846
6847 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6848 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6849 return VINF_SUCCESS;
6850}
6851
6852
6853/**
6854 * Implements 'AAA'.
6855 */
6856IEM_CIMPL_DEF_0(iemCImpl_aaa)
6857{
6858 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6859
6860 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
6861 {
6862 if ( pCtx->eflags.Bits.u1AF
6863 || (pCtx->ax & 0xf) >= 10)
6864 {
6865 iemAImpl_add_u16(&pCtx->ax, 0x106, &pCtx->eflags.u32);
6866 pCtx->eflags.Bits.u1AF = 1;
6867 pCtx->eflags.Bits.u1CF = 1;
6868#ifdef IEM_VERIFICATION_MODE_FULL
6869 pVCpu->iem.s.fUndefinedEFlags |= X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF;
6870#endif
6871 }
6872 else
6873 {
6874 iemHlpUpdateArithEFlagsU16(pVCpu, pCtx->ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6875 pCtx->eflags.Bits.u1AF = 0;
6876 pCtx->eflags.Bits.u1CF = 0;
6877 }
6878 pCtx->ax &= UINT16_C(0xff0f);
6879 }
6880 else
6881 {
6882 if ( pCtx->eflags.Bits.u1AF
6883 || (pCtx->ax & 0xf) >= 10)
6884 {
6885 pCtx->ax += UINT16_C(0x106);
6886 pCtx->eflags.Bits.u1AF = 1;
6887 pCtx->eflags.Bits.u1CF = 1;
6888 }
6889 else
6890 {
6891 pCtx->eflags.Bits.u1AF = 0;
6892 pCtx->eflags.Bits.u1CF = 0;
6893 }
6894 pCtx->ax &= UINT16_C(0xff0f);
6895 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6896 }
6897
6898 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6899 return VINF_SUCCESS;
6900}
6901
6902
6903/**
6904 * Implements 'AAS'.
6905 */
6906IEM_CIMPL_DEF_0(iemCImpl_aas)
6907{
6908 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6909
6910 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
6911 {
6912 if ( pCtx->eflags.Bits.u1AF
6913 || (pCtx->ax & 0xf) >= 10)
6914 {
6915 iemAImpl_sub_u16(&pCtx->ax, 0x106, &pCtx->eflags.u32);
6916 pCtx->eflags.Bits.u1AF = 1;
6917 pCtx->eflags.Bits.u1CF = 1;
6918#ifdef IEM_VERIFICATION_MODE_FULL
6919 pVCpu->iem.s.fUndefinedEFlags |= X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF;
6920#endif
6921 }
6922 else
6923 {
6924 iemHlpUpdateArithEFlagsU16(pVCpu, pCtx->ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6925 pCtx->eflags.Bits.u1AF = 0;
6926 pCtx->eflags.Bits.u1CF = 0;
6927 }
6928 pCtx->ax &= UINT16_C(0xff0f);
6929 }
6930 else
6931 {
6932 if ( pCtx->eflags.Bits.u1AF
6933 || (pCtx->ax & 0xf) >= 10)
6934 {
6935 pCtx->ax -= UINT16_C(0x106);
6936 pCtx->eflags.Bits.u1AF = 1;
6937 pCtx->eflags.Bits.u1CF = 1;
6938 }
6939 else
6940 {
6941 pCtx->eflags.Bits.u1AF = 0;
6942 pCtx->eflags.Bits.u1CF = 0;
6943 }
6944 pCtx->ax &= UINT16_C(0xff0f);
6945 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6946 }
6947
6948 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6949 return VINF_SUCCESS;
6950}
6951
6952
6953/**
6954 * Implements the 16-bit version of 'BOUND'.
6955 *
6956 * @note We have separate 16-bit and 32-bit variants of this function due to
6957 * the decoder using unsigned parameters, whereas we want signed one to
6958 * do the job. This is significant for a recompiler.
6959 */
6960IEM_CIMPL_DEF_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound)
6961{
6962 /*
6963 * Check if the index is inside the bounds, otherwise raise #BR.
6964 */
6965 if ( idxArray >= idxLowerBound
6966 && idxArray <= idxUpperBound)
6967 {
6968 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6969 return VINF_SUCCESS;
6970 }
6971
6972 return iemRaiseBoundRangeExceeded(pVCpu);
6973}
6974
6975
6976/**
6977 * Implements the 32-bit version of 'BOUND'.
6978 */
6979IEM_CIMPL_DEF_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound)
6980{
6981 /*
6982 * Check if the index is inside the bounds, otherwise raise #BR.
6983 */
6984 if ( idxArray >= idxLowerBound
6985 && idxArray <= idxUpperBound)
6986 {
6987 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6988 return VINF_SUCCESS;
6989 }
6990
6991 return iemRaiseBoundRangeExceeded(pVCpu);
6992}
6993
6994
6995
6996/*
6997 * Instantiate the various string operation combinations.
6998 */
6999#define OP_SIZE 8
7000#define ADDR_SIZE 16
7001#include "IEMAllCImplStrInstr.cpp.h"
7002#define OP_SIZE 8
7003#define ADDR_SIZE 32
7004#include "IEMAllCImplStrInstr.cpp.h"
7005#define OP_SIZE 8
7006#define ADDR_SIZE 64
7007#include "IEMAllCImplStrInstr.cpp.h"
7008
7009#define OP_SIZE 16
7010#define ADDR_SIZE 16
7011#include "IEMAllCImplStrInstr.cpp.h"
7012#define OP_SIZE 16
7013#define ADDR_SIZE 32
7014#include "IEMAllCImplStrInstr.cpp.h"
7015#define OP_SIZE 16
7016#define ADDR_SIZE 64
7017#include "IEMAllCImplStrInstr.cpp.h"
7018
7019#define OP_SIZE 32
7020#define ADDR_SIZE 16
7021#include "IEMAllCImplStrInstr.cpp.h"
7022#define OP_SIZE 32
7023#define ADDR_SIZE 32
7024#include "IEMAllCImplStrInstr.cpp.h"
7025#define OP_SIZE 32
7026#define ADDR_SIZE 64
7027#include "IEMAllCImplStrInstr.cpp.h"
7028
7029#define OP_SIZE 64
7030#define ADDR_SIZE 32
7031#include "IEMAllCImplStrInstr.cpp.h"
7032#define OP_SIZE 64
7033#define ADDR_SIZE 64
7034#include "IEMAllCImplStrInstr.cpp.h"
7035
7036
7037/**
7038 * Implements 'XGETBV'.
7039 */
7040IEM_CIMPL_DEF_0(iemCImpl_xgetbv)
7041{
7042 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7043 if (pCtx->cr4 & X86_CR4_OSXSAVE)
7044 {
7045 uint32_t uEcx = pCtx->ecx;
7046 switch (uEcx)
7047 {
7048 case 0:
7049 break;
7050
7051 case 1: /** @todo Implement XCR1 support. */
7052 default:
7053 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx));
7054 return iemRaiseGeneralProtectionFault0(pVCpu);
7055
7056 }
7057 pCtx->rax = RT_LO_U32(pCtx->aXcr[uEcx]);
7058 pCtx->rdx = RT_HI_U32(pCtx->aXcr[uEcx]);
7059
7060 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7061 return VINF_SUCCESS;
7062 }
7063 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n"));
7064 return iemRaiseUndefinedOpcode(pVCpu);
7065}
7066
7067
7068/**
7069 * Implements 'XSETBV'.
7070 */
7071IEM_CIMPL_DEF_0(iemCImpl_xsetbv)
7072{
7073 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7074 if (pCtx->cr4 & X86_CR4_OSXSAVE)
7075 {
7076 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
7077 {
7078 Log2(("xsetbv: Guest intercept -> #VMEXIT\n"));
7079 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7080 }
7081
7082 if (pVCpu->iem.s.uCpl == 0)
7083 {
7084 uint32_t uEcx = pCtx->ecx;
7085 uint64_t uNewValue = RT_MAKE_U64(pCtx->eax, pCtx->edx);
7086 switch (uEcx)
7087 {
7088 case 0:
7089 {
7090 int rc = CPUMSetGuestXcr0(pVCpu, uNewValue);
7091 if (rc == VINF_SUCCESS)
7092 break;
7093 Assert(rc == VERR_CPUM_RAISE_GP_0);
7094 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
7095 return iemRaiseGeneralProtectionFault0(pVCpu);
7096 }
7097
7098 case 1: /** @todo Implement XCR1 support. */
7099 default:
7100 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
7101 return iemRaiseGeneralProtectionFault0(pVCpu);
7102
7103 }
7104
7105 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7106 return VINF_SUCCESS;
7107 }
7108
7109 Log(("xsetbv cpl=%u -> GP(0)\n", pVCpu->iem.s.uCpl));
7110 return iemRaiseGeneralProtectionFault0(pVCpu);
7111 }
7112 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n"));
7113 return iemRaiseUndefinedOpcode(pVCpu);
7114}
7115
7116#ifdef IN_RING3
7117
7118/** Argument package for iemCImpl_cmpxchg16b_fallback_rendezvous_callback. */
7119struct IEMCIMPLCX16ARGS
7120{
7121 PRTUINT128U pu128Dst;
7122 PRTUINT128U pu128RaxRdx;
7123 PRTUINT128U pu128RbxRcx;
7124 uint32_t *pEFlags;
7125# ifdef VBOX_STRICT
7126 uint32_t cCalls;
7127# endif
7128};
7129
7130/**
7131 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
7132 * Worker for iemCImpl_cmpxchg16b_fallback_rendezvous}
7133 */
7134static DECLCALLBACK(VBOXSTRICTRC) iemCImpl_cmpxchg16b_fallback_rendezvous_callback(PVM pVM, PVMCPU pVCpu, void *pvUser)
7135{
7136 RT_NOREF(pVM, pVCpu);
7137 struct IEMCIMPLCX16ARGS *pArgs = (struct IEMCIMPLCX16ARGS *)pvUser;
7138# ifdef VBOX_STRICT
7139 Assert(pArgs->cCalls == 0);
7140 pArgs->cCalls++;
7141# endif
7142
7143 iemAImpl_cmpxchg16b_fallback(pArgs->pu128Dst, pArgs->pu128RaxRdx, pArgs->pu128RbxRcx, pArgs->pEFlags);
7144 return VINF_SUCCESS;
7145}
7146
7147#endif /* IN_RING3 */
7148
7149/**
7150 * Implements 'CMPXCHG16B' fallback using rendezvous.
7151 */
7152IEM_CIMPL_DEF_4(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
7153 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags)
7154{
7155#ifdef IN_RING3
7156 struct IEMCIMPLCX16ARGS Args;
7157 Args.pu128Dst = pu128Dst;
7158 Args.pu128RaxRdx = pu128RaxRdx;
7159 Args.pu128RbxRcx = pu128RbxRcx;
7160 Args.pEFlags = pEFlags;
7161# ifdef VBOX_STRICT
7162 Args.cCalls = 0;
7163# endif
7164 VBOXSTRICTRC rcStrict = VMMR3EmtRendezvous(pVCpu->CTX_SUFF(pVM), VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
7165 iemCImpl_cmpxchg16b_fallback_rendezvous_callback, &Args);
7166 Assert(Args.cCalls == 1);
7167 if (rcStrict == VINF_SUCCESS)
7168 {
7169 /* Duplicated tail code. */
7170 rcStrict = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_RW);
7171 if (rcStrict == VINF_SUCCESS)
7172 {
7173 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
7174 pCtx->eflags.u = *pEFlags; /* IEM_MC_COMMIT_EFLAGS */
7175 if (!(*pEFlags & X86_EFL_ZF))
7176 {
7177 pCtx->rax = pu128RaxRdx->s.Lo;
7178 pCtx->rdx = pu128RaxRdx->s.Hi;
7179 }
7180 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7181 }
7182 }
7183 return rcStrict;
7184#else
7185 RT_NOREF(pVCpu, cbInstr, pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags);
7186 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; /* This should get us to ring-3 for now. Should perhaps be replaced later. */
7187#endif
7188}
7189
7190
7191/**
7192 * Implements 'CLFLUSH' and 'CLFLUSHOPT'.
7193 *
7194 * This is implemented in C because it triggers a load like behviour without
7195 * actually reading anything. Since that's not so common, it's implemented
7196 * here.
7197 *
7198 * @param iEffSeg The effective segment.
7199 * @param GCPtrEff The address of the image.
7200 */
7201IEM_CIMPL_DEF_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
7202{
7203 /*
7204 * Pretend to do a load w/o reading (see also iemCImpl_monitor and iemMemMap).
7205 */
7206 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrEff);
7207 if (rcStrict == VINF_SUCCESS)
7208 {
7209 RTGCPHYS GCPhysMem;
7210 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrEff, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
7211 if (rcStrict == VINF_SUCCESS)
7212 {
7213 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7214 return VINF_SUCCESS;
7215 }
7216 }
7217
7218 return rcStrict;
7219}
7220
7221
7222/**
7223 * Implements 'FINIT' and 'FNINIT'.
7224 *
7225 * @param fCheckXcpts Whether to check for umasked pending exceptions or
7226 * not.
7227 */
7228IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
7229{
7230 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7231
7232 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
7233 return iemRaiseDeviceNotAvailable(pVCpu);
7234
7235 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
7236 if (fCheckXcpts && TODO )
7237 return iemRaiseMathFault(pVCpu);
7238 */
7239
7240 PX86XSAVEAREA pXState = pCtx->CTX_SUFF(pXState);
7241 pXState->x87.FCW = 0x37f;
7242 pXState->x87.FSW = 0;
7243 pXState->x87.FTW = 0x00; /* 0 - empty. */
7244 pXState->x87.FPUDP = 0;
7245 pXState->x87.DS = 0; //??
7246 pXState->x87.Rsrvd2= 0;
7247 pXState->x87.FPUIP = 0;
7248 pXState->x87.CS = 0; //??
7249 pXState->x87.Rsrvd1= 0;
7250 pXState->x87.FOP = 0;
7251
7252 iemHlpUsedFpu(pVCpu);
7253 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7254 return VINF_SUCCESS;
7255}
7256
7257
7258/**
7259 * Implements 'FXSAVE'.
7260 *
7261 * @param iEffSeg The effective segment.
7262 * @param GCPtrEff The address of the image.
7263 * @param enmEffOpSize The operand size (only REX.W really matters).
7264 */
7265IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
7266{
7267 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7268
7269 /*
7270 * Raise exceptions.
7271 */
7272 if (pCtx->cr0 & X86_CR0_EM)
7273 return iemRaiseUndefinedOpcode(pVCpu);
7274 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
7275 return iemRaiseDeviceNotAvailable(pVCpu);
7276 if (GCPtrEff & 15)
7277 {
7278 /** @todo CPU/VM detection possible! \#AC might not be signal for
7279 * all/any misalignment sizes, intel says its an implementation detail. */
7280 if ( (pCtx->cr0 & X86_CR0_AM)
7281 && pCtx->eflags.Bits.u1AC
7282 && pVCpu->iem.s.uCpl == 3)
7283 return iemRaiseAlignmentCheckException(pVCpu);
7284 return iemRaiseGeneralProtectionFault0(pVCpu);
7285 }
7286
7287 /*
7288 * Access the memory.
7289 */
7290 void *pvMem512;
7291 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7292 if (rcStrict != VINF_SUCCESS)
7293 return rcStrict;
7294 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
7295 PCX86FXSTATE pSrc = &pCtx->CTX_SUFF(pXState)->x87;
7296
7297 /*
7298 * Store the registers.
7299 */
7300 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
7301 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
7302
7303 /* common for all formats */
7304 pDst->FCW = pSrc->FCW;
7305 pDst->FSW = pSrc->FSW;
7306 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
7307 pDst->FOP = pSrc->FOP;
7308 pDst->MXCSR = pSrc->MXCSR;
7309 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
7310 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
7311 {
7312 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
7313 * them for now... */
7314 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
7315 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
7316 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
7317 pDst->aRegs[i].au32[3] = 0;
7318 }
7319
7320 /* FPU IP, CS, DP and DS. */
7321 pDst->FPUIP = pSrc->FPUIP;
7322 pDst->CS = pSrc->CS;
7323 pDst->FPUDP = pSrc->FPUDP;
7324 pDst->DS = pSrc->DS;
7325 if (enmEffOpSize == IEMMODE_64BIT)
7326 {
7327 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
7328 pDst->Rsrvd1 = pSrc->Rsrvd1;
7329 pDst->Rsrvd2 = pSrc->Rsrvd2;
7330 pDst->au32RsrvdForSoftware[0] = 0;
7331 }
7332 else
7333 {
7334 pDst->Rsrvd1 = 0;
7335 pDst->Rsrvd2 = 0;
7336 pDst->au32RsrvdForSoftware[0] = X86_FXSTATE_RSVD_32BIT_MAGIC;
7337 }
7338
7339 /* XMM registers. */
7340 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
7341 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
7342 || pVCpu->iem.s.uCpl != 0)
7343 {
7344 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7345 for (uint32_t i = 0; i < cXmmRegs; i++)
7346 pDst->aXMM[i] = pSrc->aXMM[i];
7347 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
7348 * right? */
7349 }
7350
7351 /*
7352 * Commit the memory.
7353 */
7354 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7355 if (rcStrict != VINF_SUCCESS)
7356 return rcStrict;
7357
7358 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7359 return VINF_SUCCESS;
7360}
7361
7362
7363/**
7364 * Implements 'FXRSTOR'.
7365 *
7366 * @param GCPtrEff The address of the image.
7367 * @param enmEffOpSize The operand size (only REX.W really matters).
7368 */
7369IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
7370{
7371 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7372
7373 /*
7374 * Raise exceptions.
7375 */
7376 if (pCtx->cr0 & X86_CR0_EM)
7377 return iemRaiseUndefinedOpcode(pVCpu);
7378 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
7379 return iemRaiseDeviceNotAvailable(pVCpu);
7380 if (GCPtrEff & 15)
7381 {
7382 /** @todo CPU/VM detection possible! \#AC might not be signal for
7383 * all/any misalignment sizes, intel says its an implementation detail. */
7384 if ( (pCtx->cr0 & X86_CR0_AM)
7385 && pCtx->eflags.Bits.u1AC
7386 && pVCpu->iem.s.uCpl == 3)
7387 return iemRaiseAlignmentCheckException(pVCpu);
7388 return iemRaiseGeneralProtectionFault0(pVCpu);
7389 }
7390
7391 /*
7392 * Access the memory.
7393 */
7394 void *pvMem512;
7395 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
7396 if (rcStrict != VINF_SUCCESS)
7397 return rcStrict;
7398 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
7399 PX86FXSTATE pDst = &pCtx->CTX_SUFF(pXState)->x87;
7400
7401 /*
7402 * Check the state for stuff which will #GP(0).
7403 */
7404 uint32_t const fMXCSR = pSrc->MXCSR;
7405 uint32_t const fMXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
7406 if (fMXCSR & ~fMXCSR_MASK)
7407 {
7408 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
7409 return iemRaiseGeneralProtectionFault0(pVCpu);
7410 }
7411
7412 /*
7413 * Load the registers.
7414 */
7415 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
7416 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
7417
7418 /* common for all formats */
7419 pDst->FCW = pSrc->FCW;
7420 pDst->FSW = pSrc->FSW;
7421 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
7422 pDst->FOP = pSrc->FOP;
7423 pDst->MXCSR = fMXCSR;
7424 /* (MXCSR_MASK is read-only) */
7425 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
7426 {
7427 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
7428 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
7429 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
7430 pDst->aRegs[i].au32[3] = 0;
7431 }
7432
7433 /* FPU IP, CS, DP and DS. */
7434 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7435 {
7436 pDst->FPUIP = pSrc->FPUIP;
7437 pDst->CS = pSrc->CS;
7438 pDst->Rsrvd1 = pSrc->Rsrvd1;
7439 pDst->FPUDP = pSrc->FPUDP;
7440 pDst->DS = pSrc->DS;
7441 pDst->Rsrvd2 = pSrc->Rsrvd2;
7442 }
7443 else
7444 {
7445 pDst->FPUIP = pSrc->FPUIP;
7446 pDst->CS = pSrc->CS;
7447 pDst->Rsrvd1 = 0;
7448 pDst->FPUDP = pSrc->FPUDP;
7449 pDst->DS = pSrc->DS;
7450 pDst->Rsrvd2 = 0;
7451 }
7452
7453 /* XMM registers. */
7454 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
7455 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
7456 || pVCpu->iem.s.uCpl != 0)
7457 {
7458 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7459 for (uint32_t i = 0; i < cXmmRegs; i++)
7460 pDst->aXMM[i] = pSrc->aXMM[i];
7461 }
7462
7463 /*
7464 * Commit the memory.
7465 */
7466 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
7467 if (rcStrict != VINF_SUCCESS)
7468 return rcStrict;
7469
7470 iemHlpUsedFpu(pVCpu);
7471 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7472 return VINF_SUCCESS;
7473}
7474
7475
7476/**
7477 * Implements 'XSAVE'.
7478 *
7479 * @param iEffSeg The effective segment.
7480 * @param GCPtrEff The address of the image.
7481 * @param enmEffOpSize The operand size (only REX.W really matters).
7482 */
7483IEM_CIMPL_DEF_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
7484{
7485 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7486
7487 /*
7488 * Raise exceptions.
7489 */
7490 if (!(pCtx->cr4 & X86_CR4_OSXSAVE))
7491 return iemRaiseUndefinedOpcode(pVCpu);
7492 if (pCtx->cr0 & X86_CR0_TS)
7493 return iemRaiseDeviceNotAvailable(pVCpu);
7494 if (GCPtrEff & 63)
7495 {
7496 /** @todo CPU/VM detection possible! \#AC might not be signal for
7497 * all/any misalignment sizes, intel says its an implementation detail. */
7498 if ( (pCtx->cr0 & X86_CR0_AM)
7499 && pCtx->eflags.Bits.u1AC
7500 && pVCpu->iem.s.uCpl == 3)
7501 return iemRaiseAlignmentCheckException(pVCpu);
7502 return iemRaiseGeneralProtectionFault0(pVCpu);
7503 }
7504
7505 /*
7506 * Calc the requested mask
7507 */
7508 uint64_t const fReqComponents = RT_MAKE_U64(pCtx->eax, pCtx->edx) & pCtx->aXcr[0];
7509 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
7510 uint64_t const fXInUse = pCtx->aXcr[0];
7511
7512/** @todo figure out the exact protocol for the memory access. Currently we
7513 * just need this crap to work halfways to make it possible to test
7514 * AVX instructions. */
7515/** @todo figure out the XINUSE and XMODIFIED */
7516
7517 /*
7518 * Access the x87 memory state.
7519 */
7520 /* The x87+SSE state. */
7521 void *pvMem512;
7522 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7523 if (rcStrict != VINF_SUCCESS)
7524 return rcStrict;
7525 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
7526 PCX86FXSTATE pSrc = &pCtx->CTX_SUFF(pXState)->x87;
7527
7528 /* The header. */
7529 PX86XSAVEHDR pHdr;
7530 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW);
7531 if (rcStrict != VINF_SUCCESS)
7532 return rcStrict;
7533
7534 /*
7535 * Store the X87 state.
7536 */
7537 if (fReqComponents & XSAVE_C_X87)
7538 {
7539 /* common for all formats */
7540 pDst->FCW = pSrc->FCW;
7541 pDst->FSW = pSrc->FSW;
7542 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
7543 pDst->FOP = pSrc->FOP;
7544 pDst->FPUIP = pSrc->FPUIP;
7545 pDst->CS = pSrc->CS;
7546 pDst->FPUDP = pSrc->FPUDP;
7547 pDst->DS = pSrc->DS;
7548 if (enmEffOpSize == IEMMODE_64BIT)
7549 {
7550 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
7551 pDst->Rsrvd1 = pSrc->Rsrvd1;
7552 pDst->Rsrvd2 = pSrc->Rsrvd2;
7553 pDst->au32RsrvdForSoftware[0] = 0;
7554 }
7555 else
7556 {
7557 pDst->Rsrvd1 = 0;
7558 pDst->Rsrvd2 = 0;
7559 pDst->au32RsrvdForSoftware[0] = X86_FXSTATE_RSVD_32BIT_MAGIC;
7560 }
7561 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
7562 {
7563 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
7564 * them for now... */
7565 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
7566 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
7567 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
7568 pDst->aRegs[i].au32[3] = 0;
7569 }
7570
7571 }
7572
7573 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
7574 {
7575 pDst->MXCSR = pSrc->MXCSR;
7576 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
7577 }
7578
7579 if (fReqComponents & XSAVE_C_SSE)
7580 {
7581 /* XMM registers. */
7582 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7583 for (uint32_t i = 0; i < cXmmRegs; i++)
7584 pDst->aXMM[i] = pSrc->aXMM[i];
7585 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
7586 * right? */
7587 }
7588
7589 /* Commit the x87 state bits. (probably wrong) */
7590 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7591 if (rcStrict != VINF_SUCCESS)
7592 return rcStrict;
7593
7594 /*
7595 * Store AVX state.
7596 */
7597 if (fReqComponents & XSAVE_C_YMM)
7598 {
7599 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
7600 AssertLogRelReturn(pCtx->aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
7601 PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI);
7602 PX86XSAVEYMMHI pCompDst;
7603 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + pCtx->aoffXState[XSAVE_C_YMM_BIT],
7604 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7605 if (rcStrict != VINF_SUCCESS)
7606 return rcStrict;
7607
7608 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7609 for (uint32_t i = 0; i < cXmmRegs; i++)
7610 pCompDst->aYmmHi[i] = pCompSrc->aYmmHi[i];
7611
7612 rcStrict = iemMemCommitAndUnmap(pVCpu, pCompDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7613 if (rcStrict != VINF_SUCCESS)
7614 return rcStrict;
7615 }
7616
7617 /*
7618 * Update the header.
7619 */
7620 pHdr->bmXState = (pHdr->bmXState & ~fReqComponents)
7621 | (fReqComponents & fXInUse);
7622
7623 rcStrict = iemMemCommitAndUnmap(pVCpu, pHdr, IEM_ACCESS_DATA_RW);
7624 if (rcStrict != VINF_SUCCESS)
7625 return rcStrict;
7626
7627 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7628 return VINF_SUCCESS;
7629}
7630
7631
7632/**
7633 * Implements 'XRSTOR'.
7634 *
7635 * @param iEffSeg The effective segment.
7636 * @param GCPtrEff The address of the image.
7637 * @param enmEffOpSize The operand size (only REX.W really matters).
7638 */
7639IEM_CIMPL_DEF_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
7640{
7641 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7642
7643 /*
7644 * Raise exceptions.
7645 */
7646 if (!(pCtx->cr4 & X86_CR4_OSXSAVE))
7647 return iemRaiseUndefinedOpcode(pVCpu);
7648 if (pCtx->cr0 & X86_CR0_TS)
7649 return iemRaiseDeviceNotAvailable(pVCpu);
7650 if (GCPtrEff & 63)
7651 {
7652 /** @todo CPU/VM detection possible! \#AC might not be signal for
7653 * all/any misalignment sizes, intel says its an implementation detail. */
7654 if ( (pCtx->cr0 & X86_CR0_AM)
7655 && pCtx->eflags.Bits.u1AC
7656 && pVCpu->iem.s.uCpl == 3)
7657 return iemRaiseAlignmentCheckException(pVCpu);
7658 return iemRaiseGeneralProtectionFault0(pVCpu);
7659 }
7660
7661/** @todo figure out the exact protocol for the memory access. Currently we
7662 * just need this crap to work halfways to make it possible to test
7663 * AVX instructions. */
7664/** @todo figure out the XINUSE and XMODIFIED */
7665
7666 /*
7667 * Access the x87 memory state.
7668 */
7669 /* The x87+SSE state. */
7670 void *pvMem512;
7671 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
7672 if (rcStrict != VINF_SUCCESS)
7673 return rcStrict;
7674 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
7675 PX86FXSTATE pDst = &pCtx->CTX_SUFF(pXState)->x87;
7676
7677 /*
7678 * Calc the requested mask
7679 */
7680 PX86XSAVEHDR pHdrDst = &pCtx->CTX_SUFF(pXState)->Hdr;
7681 PCX86XSAVEHDR pHdrSrc;
7682 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_R);
7683 if (rcStrict != VINF_SUCCESS)
7684 return rcStrict;
7685
7686 uint64_t const fReqComponents = RT_MAKE_U64(pCtx->eax, pCtx->edx) & pCtx->aXcr[0];
7687 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
7688 //uint64_t const fXInUse = pCtx->aXcr[0];
7689 uint64_t const fRstorMask = pHdrSrc->bmXState;
7690 uint64_t const fCompMask = pHdrSrc->bmXComp;
7691
7692 AssertLogRelReturn(!(fCompMask & XSAVE_C_X), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
7693
7694 uint32_t const cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7695
7696 /* We won't need this any longer. */
7697 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pHdrSrc, IEM_ACCESS_DATA_R);
7698 if (rcStrict != VINF_SUCCESS)
7699 return rcStrict;
7700
7701 /*
7702 * Store the X87 state.
7703 */
7704 if (fReqComponents & XSAVE_C_X87)
7705 {
7706 if (fRstorMask & XSAVE_C_X87)
7707 {
7708 pDst->FCW = pSrc->FCW;
7709 pDst->FSW = pSrc->FSW;
7710 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
7711 pDst->FOP = pSrc->FOP;
7712 pDst->FPUIP = pSrc->FPUIP;
7713 pDst->CS = pSrc->CS;
7714 pDst->FPUDP = pSrc->FPUDP;
7715 pDst->DS = pSrc->DS;
7716 if (enmEffOpSize == IEMMODE_64BIT)
7717 {
7718 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
7719 pDst->Rsrvd1 = pSrc->Rsrvd1;
7720 pDst->Rsrvd2 = pSrc->Rsrvd2;
7721 }
7722 else
7723 {
7724 pDst->Rsrvd1 = 0;
7725 pDst->Rsrvd2 = 0;
7726 }
7727 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
7728 {
7729 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
7730 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
7731 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
7732 pDst->aRegs[i].au32[3] = 0;
7733 }
7734 }
7735 else
7736 {
7737 pDst->FCW = 0x37f;
7738 pDst->FSW = 0;
7739 pDst->FTW = 0x00; /* 0 - empty. */
7740 pDst->FPUDP = 0;
7741 pDst->DS = 0; //??
7742 pDst->Rsrvd2= 0;
7743 pDst->FPUIP = 0;
7744 pDst->CS = 0; //??
7745 pDst->Rsrvd1= 0;
7746 pDst->FOP = 0;
7747 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
7748 {
7749 pDst->aRegs[i].au32[0] = 0;
7750 pDst->aRegs[i].au32[1] = 0;
7751 pDst->aRegs[i].au32[2] = 0;
7752 pDst->aRegs[i].au32[3] = 0;
7753 }
7754 }
7755 pHdrDst->bmXState |= XSAVE_C_X87; /* playing safe for now */
7756 }
7757
7758 /* MXCSR */
7759 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
7760 {
7761 if (fRstorMask & (XSAVE_C_SSE | XSAVE_C_YMM))
7762 pDst->MXCSR = pSrc->MXCSR;
7763 else
7764 pDst->MXCSR = 0x1f80;
7765 }
7766
7767 /* XMM registers. */
7768 if (fReqComponents & XSAVE_C_SSE)
7769 {
7770 if (fRstorMask & XSAVE_C_SSE)
7771 {
7772 for (uint32_t i = 0; i < cXmmRegs; i++)
7773 pDst->aXMM[i] = pSrc->aXMM[i];
7774 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
7775 * right? */
7776 }
7777 else
7778 {
7779 for (uint32_t i = 0; i < cXmmRegs; i++)
7780 {
7781 pDst->aXMM[i].au64[0] = 0;
7782 pDst->aXMM[i].au64[1] = 0;
7783 }
7784 }
7785 pHdrDst->bmXState |= XSAVE_C_SSE; /* playing safe for now */
7786 }
7787
7788 /* Unmap the x87 state bits (so we've don't run out of mapping). */
7789 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
7790 if (rcStrict != VINF_SUCCESS)
7791 return rcStrict;
7792
7793 /*
7794 * Restore AVX state.
7795 */
7796 if (fReqComponents & XSAVE_C_YMM)
7797 {
7798 AssertLogRelReturn(pCtx->aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
7799 PX86XSAVEYMMHI pCompDst = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_YMM_BIT, PX86XSAVEYMMHI);
7800
7801 if (fRstorMask & XSAVE_C_YMM)
7802 {
7803 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
7804 PCX86XSAVEYMMHI pCompSrc;
7805 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, sizeof(*pCompDst),
7806 iEffSeg, GCPtrEff + pCtx->aoffXState[XSAVE_C_YMM_BIT], IEM_ACCESS_DATA_R);
7807 if (rcStrict != VINF_SUCCESS)
7808 return rcStrict;
7809
7810 for (uint32_t i = 0; i < cXmmRegs; i++)
7811 {
7812 pCompDst->aYmmHi[i].au64[0] = pCompSrc->aYmmHi[i].au64[0];
7813 pCompDst->aYmmHi[i].au64[1] = pCompSrc->aYmmHi[i].au64[1];
7814 }
7815
7816 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pCompSrc, IEM_ACCESS_DATA_R);
7817 if (rcStrict != VINF_SUCCESS)
7818 return rcStrict;
7819 }
7820 else
7821 {
7822 for (uint32_t i = 0; i < cXmmRegs; i++)
7823 {
7824 pCompDst->aYmmHi[i].au64[0] = 0;
7825 pCompDst->aYmmHi[i].au64[1] = 0;
7826 }
7827 }
7828 pHdrDst->bmXState |= XSAVE_C_YMM; /* playing safe for now */
7829 }
7830
7831 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7832 return VINF_SUCCESS;
7833}
7834
7835
7836
7837
7838/**
7839 * Implements 'STMXCSR'.
7840 *
7841 * @param GCPtrEff The address of the image.
7842 */
7843IEM_CIMPL_DEF_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
7844{
7845 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7846
7847 /*
7848 * Raise exceptions.
7849 */
7850 if ( !(pCtx->cr0 & X86_CR0_EM)
7851 && (pCtx->cr4 & X86_CR4_OSFXSR))
7852 {
7853 if (!(pCtx->cr0 & X86_CR0_TS))
7854 {
7855 /*
7856 * Do the job.
7857 */
7858 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pCtx->CTX_SUFF(pXState)->x87.MXCSR);
7859 if (rcStrict == VINF_SUCCESS)
7860 {
7861 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7862 return VINF_SUCCESS;
7863 }
7864 return rcStrict;
7865 }
7866 return iemRaiseDeviceNotAvailable(pVCpu);
7867 }
7868 return iemRaiseUndefinedOpcode(pVCpu);
7869}
7870
7871
7872/**
7873 * Implements 'VSTMXCSR'.
7874 *
7875 * @param GCPtrEff The address of the image.
7876 */
7877IEM_CIMPL_DEF_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
7878{
7879 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7880
7881 /*
7882 * Raise exceptions.
7883 */
7884 if ( ( !IEM_IS_GUEST_CPU_AMD(pVCpu)
7885 ? (pCtx->aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM)
7886 : !(pCtx->cr0 & X86_CR0_EM)) /* AMD Jaguar CPU (f0x16,m0,s1) behaviour */
7887 && (pCtx->cr4 & X86_CR4_OSXSAVE))
7888 {
7889 if (!(pCtx->cr0 & X86_CR0_TS))
7890 {
7891 /*
7892 * Do the job.
7893 */
7894 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pCtx->CTX_SUFF(pXState)->x87.MXCSR);
7895 if (rcStrict == VINF_SUCCESS)
7896 {
7897 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7898 return VINF_SUCCESS;
7899 }
7900 return rcStrict;
7901 }
7902 return iemRaiseDeviceNotAvailable(pVCpu);
7903 }
7904 return iemRaiseUndefinedOpcode(pVCpu);
7905}
7906
7907
7908/**
7909 * Implements 'LDMXCSR'.
7910 *
7911 * @param GCPtrEff The address of the image.
7912 */
7913IEM_CIMPL_DEF_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
7914{
7915 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7916
7917 /*
7918 * Raise exceptions.
7919 */
7920 /** @todo testcase - order of LDMXCSR faults. Does \#PF, \#GP and \#SS
7921 * happen after or before \#UD and \#EM? */
7922 if ( !(pCtx->cr0 & X86_CR0_EM)
7923 && (pCtx->cr4 & X86_CR4_OSFXSR))
7924 {
7925 if (!(pCtx->cr0 & X86_CR0_TS))
7926 {
7927 /*
7928 * Do the job.
7929 */
7930 uint32_t fNewMxCsr;
7931 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, &fNewMxCsr, iEffSeg, GCPtrEff);
7932 if (rcStrict == VINF_SUCCESS)
7933 {
7934 uint32_t const fMxCsrMask = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
7935 if (!(fNewMxCsr & ~fMxCsrMask))
7936 {
7937 pCtx->CTX_SUFF(pXState)->x87.MXCSR = fNewMxCsr;
7938 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7939 return VINF_SUCCESS;
7940 }
7941 Log(("lddmxcsr: New MXCSR=%#RX32 & ~MASK=%#RX32 = %#RX32 -> #GP(0)\n",
7942 fNewMxCsr, fMxCsrMask, fNewMxCsr & ~fMxCsrMask));
7943 return iemRaiseGeneralProtectionFault0(pVCpu);
7944 }
7945 return rcStrict;
7946 }
7947 return iemRaiseDeviceNotAvailable(pVCpu);
7948 }
7949 return iemRaiseUndefinedOpcode(pVCpu);
7950}
7951
7952
7953/**
7954 * Commmon routine for fnstenv and fnsave.
7955 *
7956 * @param uPtr Where to store the state.
7957 * @param pCtx The CPU context.
7958 */
7959static void iemCImplCommonFpuStoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
7960{
7961 PCX86FXSTATE pSrcX87 = &pCtx->CTX_SUFF(pXState)->x87;
7962 if (enmEffOpSize == IEMMODE_16BIT)
7963 {
7964 uPtr.pu16[0] = pSrcX87->FCW;
7965 uPtr.pu16[1] = pSrcX87->FSW;
7966 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87);
7967 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7968 {
7969 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
7970 * protected mode or long mode and we save it in real mode? And vice
7971 * versa? And with 32-bit operand size? I think CPU is storing the
7972 * effective address ((CS << 4) + IP) in the offset register and not
7973 * doing any address calculations here. */
7974 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP;
7975 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP;
7976 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP;
7977 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000);
7978 }
7979 else
7980 {
7981 uPtr.pu16[3] = pSrcX87->FPUIP;
7982 uPtr.pu16[4] = pSrcX87->CS;
7983 uPtr.pu16[5] = pSrcX87->FPUDP;
7984 uPtr.pu16[6] = pSrcX87->DS;
7985 }
7986 }
7987 else
7988 {
7989 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
7990 uPtr.pu16[0*2] = pSrcX87->FCW;
7991 uPtr.pu16[0*2+1] = 0xffff; /* (0xffff observed on intel skylake.) */
7992 uPtr.pu16[1*2] = pSrcX87->FSW;
7993 uPtr.pu16[1*2+1] = 0xffff;
7994 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87);
7995 uPtr.pu16[2*2+1] = 0xffff;
7996 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7997 {
7998 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP;
7999 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP;
8000 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP;
8001 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4;
8002 }
8003 else
8004 {
8005 uPtr.pu32[3] = pSrcX87->FPUIP;
8006 uPtr.pu16[4*2] = pSrcX87->CS;
8007 uPtr.pu16[4*2+1] = pSrcX87->FOP;
8008 uPtr.pu32[5] = pSrcX87->FPUDP;
8009 uPtr.pu16[6*2] = pSrcX87->DS;
8010 uPtr.pu16[6*2+1] = 0xffff;
8011 }
8012 }
8013}
8014
8015
8016/**
8017 * Commmon routine for fldenv and frstor
8018 *
8019 * @param uPtr Where to store the state.
8020 * @param pCtx The CPU context.
8021 */
8022static void iemCImplCommonFpuRestoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
8023{
8024 PX86FXSTATE pDstX87 = &pCtx->CTX_SUFF(pXState)->x87;
8025 if (enmEffOpSize == IEMMODE_16BIT)
8026 {
8027 pDstX87->FCW = uPtr.pu16[0];
8028 pDstX87->FSW = uPtr.pu16[1];
8029 pDstX87->FTW = uPtr.pu16[2];
8030 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
8031 {
8032 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
8033 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
8034 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
8035 pDstX87->CS = 0;
8036 pDstX87->Rsrvd1= 0;
8037 pDstX87->DS = 0;
8038 pDstX87->Rsrvd2= 0;
8039 }
8040 else
8041 {
8042 pDstX87->FPUIP = uPtr.pu16[3];
8043 pDstX87->CS = uPtr.pu16[4];
8044 pDstX87->Rsrvd1= 0;
8045 pDstX87->FPUDP = uPtr.pu16[5];
8046 pDstX87->DS = uPtr.pu16[6];
8047 pDstX87->Rsrvd2= 0;
8048 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
8049 }
8050 }
8051 else
8052 {
8053 pDstX87->FCW = uPtr.pu16[0*2];
8054 pDstX87->FSW = uPtr.pu16[1*2];
8055 pDstX87->FTW = uPtr.pu16[2*2];
8056 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
8057 {
8058 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
8059 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
8060 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
8061 pDstX87->CS = 0;
8062 pDstX87->Rsrvd1= 0;
8063 pDstX87->DS = 0;
8064 pDstX87->Rsrvd2= 0;
8065 }
8066 else
8067 {
8068 pDstX87->FPUIP = uPtr.pu32[3];
8069 pDstX87->CS = uPtr.pu16[4*2];
8070 pDstX87->Rsrvd1= 0;
8071 pDstX87->FOP = uPtr.pu16[4*2+1];
8072 pDstX87->FPUDP = uPtr.pu32[5];
8073 pDstX87->DS = uPtr.pu16[6*2];
8074 pDstX87->Rsrvd2= 0;
8075 }
8076 }
8077
8078 /* Make adjustments. */
8079 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW);
8080 pDstX87->FCW &= ~X86_FCW_ZERO_MASK;
8081 iemFpuRecalcExceptionStatus(pDstX87);
8082 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
8083 * exceptions are pending after loading the saved state? */
8084}
8085
8086
8087/**
8088 * Implements 'FNSTENV'.
8089 *
8090 * @param enmEffOpSize The operand size (only REX.W really matters).
8091 * @param iEffSeg The effective segment register for @a GCPtrEff.
8092 * @param GCPtrEffDst The address of the image.
8093 */
8094IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
8095{
8096 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8097 RTPTRUNION uPtr;
8098 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
8099 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8100 if (rcStrict != VINF_SUCCESS)
8101 return rcStrict;
8102
8103 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx);
8104
8105 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8106 if (rcStrict != VINF_SUCCESS)
8107 return rcStrict;
8108
8109 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
8110 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8111 return VINF_SUCCESS;
8112}
8113
8114
8115/**
8116 * Implements 'FNSAVE'.
8117 *
8118 * @param GCPtrEffDst The address of the image.
8119 * @param enmEffOpSize The operand size.
8120 */
8121IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
8122{
8123 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8124 RTPTRUNION uPtr;
8125 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
8126 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8127 if (rcStrict != VINF_SUCCESS)
8128 return rcStrict;
8129
8130 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8131 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx);
8132 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
8133 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
8134 {
8135 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0];
8136 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1];
8137 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4];
8138 }
8139
8140 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8141 if (rcStrict != VINF_SUCCESS)
8142 return rcStrict;
8143
8144 /*
8145 * Re-initialize the FPU context.
8146 */
8147 pFpuCtx->FCW = 0x37f;
8148 pFpuCtx->FSW = 0;
8149 pFpuCtx->FTW = 0x00; /* 0 - empty */
8150 pFpuCtx->FPUDP = 0;
8151 pFpuCtx->DS = 0;
8152 pFpuCtx->Rsrvd2= 0;
8153 pFpuCtx->FPUIP = 0;
8154 pFpuCtx->CS = 0;
8155 pFpuCtx->Rsrvd1= 0;
8156 pFpuCtx->FOP = 0;
8157
8158 iemHlpUsedFpu(pVCpu);
8159 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8160 return VINF_SUCCESS;
8161}
8162
8163
8164
8165/**
8166 * Implements 'FLDENV'.
8167 *
8168 * @param enmEffOpSize The operand size (only REX.W really matters).
8169 * @param iEffSeg The effective segment register for @a GCPtrEff.
8170 * @param GCPtrEffSrc The address of the image.
8171 */
8172IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
8173{
8174 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8175 RTCPTRUNION uPtr;
8176 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
8177 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
8178 if (rcStrict != VINF_SUCCESS)
8179 return rcStrict;
8180
8181 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx);
8182
8183 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
8184 if (rcStrict != VINF_SUCCESS)
8185 return rcStrict;
8186
8187 iemHlpUsedFpu(pVCpu);
8188 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8189 return VINF_SUCCESS;
8190}
8191
8192
8193/**
8194 * Implements 'FRSTOR'.
8195 *
8196 * @param GCPtrEffSrc The address of the image.
8197 * @param enmEffOpSize The operand size.
8198 */
8199IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
8200{
8201 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8202 RTCPTRUNION uPtr;
8203 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
8204 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
8205 if (rcStrict != VINF_SUCCESS)
8206 return rcStrict;
8207
8208 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8209 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx);
8210 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
8211 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
8212 {
8213 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0];
8214 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1];
8215 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4];
8216 pFpuCtx->aRegs[i].au32[3] = 0;
8217 }
8218
8219 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
8220 if (rcStrict != VINF_SUCCESS)
8221 return rcStrict;
8222
8223 iemHlpUsedFpu(pVCpu);
8224 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8225 return VINF_SUCCESS;
8226}
8227
8228
8229/**
8230 * Implements 'FLDCW'.
8231 *
8232 * @param u16Fcw The new FCW.
8233 */
8234IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
8235{
8236 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8237
8238 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
8239 /** @todo Testcase: Try see what happens when trying to set undefined bits
8240 * (other than 6 and 7). Currently ignoring them. */
8241 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
8242 * according to FSW. (This is was is currently implemented.) */
8243 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8244 pFpuCtx->FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
8245 iemFpuRecalcExceptionStatus(pFpuCtx);
8246
8247 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
8248 iemHlpUsedFpu(pVCpu);
8249 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8250 return VINF_SUCCESS;
8251}
8252
8253
8254
8255/**
8256 * Implements the underflow case of fxch.
8257 *
8258 * @param iStReg The other stack register.
8259 */
8260IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
8261{
8262 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8263
8264 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8265 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW);
8266 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
8267 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW));
8268
8269 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
8270 * registers are read as QNaN and then exchanged. This could be
8271 * wrong... */
8272 if (pFpuCtx->FCW & X86_FCW_IM)
8273 {
8274 if (RT_BIT(iReg1) & pFpuCtx->FTW)
8275 {
8276 if (RT_BIT(iReg2) & pFpuCtx->FTW)
8277 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
8278 else
8279 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80;
8280 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
8281 }
8282 else
8283 {
8284 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80;
8285 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
8286 }
8287 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
8288 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
8289 }
8290 else
8291 {
8292 /* raise underflow exception, don't change anything. */
8293 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
8294 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
8295 }
8296
8297 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
8298 iemHlpUsedFpu(pVCpu);
8299 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8300 return VINF_SUCCESS;
8301}
8302
8303
8304/**
8305 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
8306 *
8307 * @param cToAdd 1 or 7.
8308 */
8309IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
8310{
8311 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8312 Assert(iStReg < 8);
8313
8314 /*
8315 * Raise exceptions.
8316 */
8317 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
8318 return iemRaiseDeviceNotAvailable(pVCpu);
8319
8320 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8321 uint16_t u16Fsw = pFpuCtx->FSW;
8322 if (u16Fsw & X86_FSW_ES)
8323 return iemRaiseMathFault(pVCpu);
8324
8325 /*
8326 * Check if any of the register accesses causes #SF + #IA.
8327 */
8328 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
8329 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
8330 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
8331 {
8332 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
8333 NOREF(u32Eflags);
8334
8335 pFpuCtx->FSW &= ~X86_FSW_C1;
8336 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
8337 if ( !(u16Fsw & X86_FSW_IE)
8338 || (pFpuCtx->FCW & X86_FCW_IM) )
8339 {
8340 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
8341 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
8342 }
8343 }
8344 else if (pFpuCtx->FCW & X86_FCW_IM)
8345 {
8346 /* Masked underflow. */
8347 pFpuCtx->FSW &= ~X86_FSW_C1;
8348 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
8349 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
8350 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
8351 }
8352 else
8353 {
8354 /* Raise underflow - don't touch EFLAGS or TOP. */
8355 pFpuCtx->FSW &= ~X86_FSW_C1;
8356 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
8357 fPop = false;
8358 }
8359
8360 /*
8361 * Pop if necessary.
8362 */
8363 if (fPop)
8364 {
8365 pFpuCtx->FTW &= ~RT_BIT(iReg1);
8366 pFpuCtx->FSW &= X86_FSW_TOP_MASK;
8367 pFpuCtx->FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
8368 }
8369
8370 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
8371 iemHlpUsedFpu(pVCpu);
8372 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8373 return VINF_SUCCESS;
8374}
8375
8376/** @} */
8377
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette