VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 67078

Last change on this file since 67078 was 66848, checked in by vboxsync, 8 years ago

VMM: use RT_LO/HI_U32 at certain places

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 284.3 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 66848 2017-05-09 13:04:57Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @name Misc Helpers
19 * @{
20 */
21
22
23/**
24 * Worker function for iemHlpCheckPortIOPermission, don't call directly.
25 *
26 * @returns Strict VBox status code.
27 *
28 * @param pVCpu The cross context virtual CPU structure of the calling thread.
29 * @param pCtx The register context.
30 * @param u16Port The port number.
31 * @param cbOperand The operand size.
32 */
33static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PVMCPU pVCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
34{
35 /* The TSS bits we're interested in are the same on 386 and AMD64. */
36 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY);
37 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
38 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap);
39 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64));
40
41 /*
42 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
43 */
44 Assert(!pCtx->tr.Attr.n.u1DescType);
45 if (RT_UNLIKELY( pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY
46 && pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))
47 {
48 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
49 u16Port, cbOperand, pCtx->tr.Attr.n.u4Type, pCtx->tr.Attr.u));
50 return iemRaiseGeneralProtectionFault0(pVCpu);
51 }
52
53 /*
54 * Read the bitmap offset (may #PF).
55 */
56 uint16_t offBitmap;
57 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &offBitmap, UINT8_MAX,
58 pCtx->tr.u64Base + RT_OFFSETOF(X86TSS64, offIoBitmap));
59 if (rcStrict != VINF_SUCCESS)
60 {
61 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
62 return rcStrict;
63 }
64
65 /*
66 * The bit range from u16Port to (u16Port + cbOperand - 1), however intel
67 * describes the CPU actually reading two bytes regardless of whether the
68 * bit range crosses a byte boundrary. Thus the + 1 in the test below.
69 */
70 uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap;
71 /** @todo check if real CPUs ensures that offBitmap has a minimum value of
72 * for instance sizeof(X86TSS32). */
73 if (offFirstBit + 1 > pCtx->tr.u32Limit) /* the limit is inclusive */
74 {
75 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
76 offFirstBit, pCtx->tr.u32Limit));
77 return iemRaiseGeneralProtectionFault0(pVCpu);
78 }
79
80 /*
81 * Read the necessary bits.
82 */
83 /** @todo Test the assertion in the intel manual that the CPU reads two
84 * bytes. The question is how this works wrt to #PF and #GP on the
85 * 2nd byte when it's not required. */
86 uint16_t bmBytes = UINT16_MAX;
87 rcStrict = iemMemFetchSysU16(pVCpu, &bmBytes, UINT8_MAX, pCtx->tr.u64Base + offFirstBit);
88 if (rcStrict != VINF_SUCCESS)
89 {
90 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
91 return rcStrict;
92 }
93
94 /*
95 * Perform the check.
96 */
97 uint16_t fPortMask = (1 << cbOperand) - 1;
98 bmBytes >>= (u16Port & 7);
99 if (bmBytes & fPortMask)
100 {
101 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
102 u16Port, cbOperand, bmBytes, fPortMask));
103 return iemRaiseGeneralProtectionFault0(pVCpu);
104 }
105
106 return VINF_SUCCESS;
107}
108
109
110/**
111 * Checks if we are allowed to access the given I/O port, raising the
112 * appropriate exceptions if we aren't (or if the I/O bitmap is not
113 * accessible).
114 *
115 * @returns Strict VBox status code.
116 *
117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
118 * @param pCtx The register context.
119 * @param u16Port The port number.
120 * @param cbOperand The operand size.
121 */
122DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PVMCPU pVCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
123{
124 X86EFLAGS Efl;
125 Efl.u = IEMMISC_GET_EFL(pVCpu, pCtx);
126 if ( (pCtx->cr0 & X86_CR0_PE)
127 && ( pVCpu->iem.s.uCpl > Efl.Bits.u2IOPL
128 || Efl.Bits.u1VM) )
129 return iemHlpCheckPortIOPermissionBitmap(pVCpu, pCtx, u16Port, cbOperand);
130 return VINF_SUCCESS;
131}
132
133
134#if 0
135/**
136 * Calculates the parity bit.
137 *
138 * @returns true if the bit is set, false if not.
139 * @param u8Result The least significant byte of the result.
140 */
141static bool iemHlpCalcParityFlag(uint8_t u8Result)
142{
143 /*
144 * Parity is set if the number of bits in the least significant byte of
145 * the result is even.
146 */
147 uint8_t cBits;
148 cBits = u8Result & 1; /* 0 */
149 u8Result >>= 1;
150 cBits += u8Result & 1;
151 u8Result >>= 1;
152 cBits += u8Result & 1;
153 u8Result >>= 1;
154 cBits += u8Result & 1;
155 u8Result >>= 1;
156 cBits += u8Result & 1; /* 4 */
157 u8Result >>= 1;
158 cBits += u8Result & 1;
159 u8Result >>= 1;
160 cBits += u8Result & 1;
161 u8Result >>= 1;
162 cBits += u8Result & 1;
163 return !(cBits & 1);
164}
165#endif /* not used */
166
167
168/**
169 * Updates the specified flags according to a 8-bit result.
170 *
171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
172 * @param u8Result The result to set the flags according to.
173 * @param fToUpdate The flags to update.
174 * @param fUndefined The flags that are specified as undefined.
175 */
176static void iemHlpUpdateArithEFlagsU8(PVMCPU pVCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
177{
178 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
179
180 uint32_t fEFlags = pCtx->eflags.u;
181 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
182 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
183 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
184#ifdef IEM_VERIFICATION_MODE_FULL
185 pVCpu->iem.s.fUndefinedEFlags |= fUndefined;
186#endif
187}
188
189
190/**
191 * Updates the specified flags according to a 16-bit result.
192 *
193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
194 * @param u16Result The result to set the flags according to.
195 * @param fToUpdate The flags to update.
196 * @param fUndefined The flags that are specified as undefined.
197 */
198static void iemHlpUpdateArithEFlagsU16(PVMCPU pVCpu, uint16_t u16Result, uint32_t fToUpdate, uint32_t fUndefined)
199{
200 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
201
202 uint32_t fEFlags = pCtx->eflags.u;
203 iemAImpl_test_u16(&u16Result, u16Result, &fEFlags);
204 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
205 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
206#ifdef IEM_VERIFICATION_MODE_FULL
207 pVCpu->iem.s.fUndefinedEFlags |= fUndefined;
208#endif
209}
210
211
212/**
213 * Helper used by iret.
214 *
215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
216 * @param uCpl The new CPL.
217 * @param pSReg Pointer to the segment register.
218 */
219static void iemHlpAdjustSelectorForNewCpl(PVMCPU pVCpu, uint8_t uCpl, PCPUMSELREG pSReg)
220{
221#ifdef VBOX_WITH_RAW_MODE_NOT_R0
222 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
223 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
224#else
225 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
226#endif
227
228 if ( uCpl > pSReg->Attr.n.u2Dpl
229 && pSReg->Attr.n.u1DescType /* code or data, not system */
230 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
231 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
232 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, 0);
233}
234
235
236/**
237 * Indicates that we have modified the FPU state.
238 *
239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
240 */
241DECLINLINE(void) iemHlpUsedFpu(PVMCPU pVCpu)
242{
243 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
244}
245
246/** @} */
247
248/** @name C Implementations
249 * @{
250 */
251
252/**
253 * Implements a 16-bit popa.
254 */
255IEM_CIMPL_DEF_0(iemCImpl_popa_16)
256{
257 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
258 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu, pCtx);
259 RTGCPTR GCPtrLast = GCPtrStart + 15;
260 VBOXSTRICTRC rcStrict;
261
262 /*
263 * The docs are a bit hard to comprehend here, but it looks like we wrap
264 * around in real mode as long as none of the individual "popa" crosses the
265 * end of the stack segment. In protected mode we check the whole access
266 * in one go. For efficiency, only do the word-by-word thing if we're in
267 * danger of wrapping around.
268 */
269 /** @todo do popa boundary / wrap-around checks. */
270 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
271 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
272 {
273 /* word-by-word */
274 RTUINT64U TmpRsp;
275 TmpRsp.u = pCtx->rsp;
276 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->di, &TmpRsp);
277 if (rcStrict == VINF_SUCCESS)
278 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->si, &TmpRsp);
279 if (rcStrict == VINF_SUCCESS)
280 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->bp, &TmpRsp);
281 if (rcStrict == VINF_SUCCESS)
282 {
283 iemRegAddToRspEx(pVCpu, pCtx, &TmpRsp, 2); /* sp */
284 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->bx, &TmpRsp);
285 }
286 if (rcStrict == VINF_SUCCESS)
287 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->dx, &TmpRsp);
288 if (rcStrict == VINF_SUCCESS)
289 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->cx, &TmpRsp);
290 if (rcStrict == VINF_SUCCESS)
291 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->ax, &TmpRsp);
292 if (rcStrict == VINF_SUCCESS)
293 {
294 pCtx->rsp = TmpRsp.u;
295 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
296 }
297 }
298 else
299 {
300 uint16_t const *pa16Mem = NULL;
301 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
302 if (rcStrict == VINF_SUCCESS)
303 {
304 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
305 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
306 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
307 /* skip sp */
308 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
309 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
310 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
311 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
312 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
313 if (rcStrict == VINF_SUCCESS)
314 {
315 iemRegAddToRsp(pVCpu, pCtx, 16);
316 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
317 }
318 }
319 }
320 return rcStrict;
321}
322
323
324/**
325 * Implements a 32-bit popa.
326 */
327IEM_CIMPL_DEF_0(iemCImpl_popa_32)
328{
329 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
330 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu, pCtx);
331 RTGCPTR GCPtrLast = GCPtrStart + 31;
332 VBOXSTRICTRC rcStrict;
333
334 /*
335 * The docs are a bit hard to comprehend here, but it looks like we wrap
336 * around in real mode as long as none of the individual "popa" crosses the
337 * end of the stack segment. In protected mode we check the whole access
338 * in one go. For efficiency, only do the word-by-word thing if we're in
339 * danger of wrapping around.
340 */
341 /** @todo do popa boundary / wrap-around checks. */
342 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
343 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
344 {
345 /* word-by-word */
346 RTUINT64U TmpRsp;
347 TmpRsp.u = pCtx->rsp;
348 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->edi, &TmpRsp);
349 if (rcStrict == VINF_SUCCESS)
350 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->esi, &TmpRsp);
351 if (rcStrict == VINF_SUCCESS)
352 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->ebp, &TmpRsp);
353 if (rcStrict == VINF_SUCCESS)
354 {
355 iemRegAddToRspEx(pVCpu, pCtx, &TmpRsp, 2); /* sp */
356 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->ebx, &TmpRsp);
357 }
358 if (rcStrict == VINF_SUCCESS)
359 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->edx, &TmpRsp);
360 if (rcStrict == VINF_SUCCESS)
361 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->ecx, &TmpRsp);
362 if (rcStrict == VINF_SUCCESS)
363 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->eax, &TmpRsp);
364 if (rcStrict == VINF_SUCCESS)
365 {
366#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
367 pCtx->rdi &= UINT32_MAX;
368 pCtx->rsi &= UINT32_MAX;
369 pCtx->rbp &= UINT32_MAX;
370 pCtx->rbx &= UINT32_MAX;
371 pCtx->rdx &= UINT32_MAX;
372 pCtx->rcx &= UINT32_MAX;
373 pCtx->rax &= UINT32_MAX;
374#endif
375 pCtx->rsp = TmpRsp.u;
376 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
377 }
378 }
379 else
380 {
381 uint32_t const *pa32Mem;
382 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
383 if (rcStrict == VINF_SUCCESS)
384 {
385 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
386 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
387 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
388 /* skip esp */
389 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
390 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
391 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
392 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
393 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
394 if (rcStrict == VINF_SUCCESS)
395 {
396 iemRegAddToRsp(pVCpu, pCtx, 32);
397 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
398 }
399 }
400 }
401 return rcStrict;
402}
403
404
405/**
406 * Implements a 16-bit pusha.
407 */
408IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
409{
410 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
411 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu, pCtx);
412 RTGCPTR GCPtrBottom = GCPtrTop - 15;
413 VBOXSTRICTRC rcStrict;
414
415 /*
416 * The docs are a bit hard to comprehend here, but it looks like we wrap
417 * around in real mode as long as none of the individual "pushd" crosses the
418 * end of the stack segment. In protected mode we check the whole access
419 * in one go. For efficiency, only do the word-by-word thing if we're in
420 * danger of wrapping around.
421 */
422 /** @todo do pusha boundary / wrap-around checks. */
423 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
424 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
425 {
426 /* word-by-word */
427 RTUINT64U TmpRsp;
428 TmpRsp.u = pCtx->rsp;
429 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->ax, &TmpRsp);
430 if (rcStrict == VINF_SUCCESS)
431 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->cx, &TmpRsp);
432 if (rcStrict == VINF_SUCCESS)
433 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->dx, &TmpRsp);
434 if (rcStrict == VINF_SUCCESS)
435 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->bx, &TmpRsp);
436 if (rcStrict == VINF_SUCCESS)
437 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->sp, &TmpRsp);
438 if (rcStrict == VINF_SUCCESS)
439 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->bp, &TmpRsp);
440 if (rcStrict == VINF_SUCCESS)
441 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->si, &TmpRsp);
442 if (rcStrict == VINF_SUCCESS)
443 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->di, &TmpRsp);
444 if (rcStrict == VINF_SUCCESS)
445 {
446 pCtx->rsp = TmpRsp.u;
447 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
448 }
449 }
450 else
451 {
452 GCPtrBottom--;
453 uint16_t *pa16Mem = NULL;
454 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
455 if (rcStrict == VINF_SUCCESS)
456 {
457 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
458 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
459 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
460 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
461 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
462 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
463 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
464 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
465 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
466 if (rcStrict == VINF_SUCCESS)
467 {
468 iemRegSubFromRsp(pVCpu, pCtx, 16);
469 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
470 }
471 }
472 }
473 return rcStrict;
474}
475
476
477/**
478 * Implements a 32-bit pusha.
479 */
480IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
481{
482 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
483 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu, pCtx);
484 RTGCPTR GCPtrBottom = GCPtrTop - 31;
485 VBOXSTRICTRC rcStrict;
486
487 /*
488 * The docs are a bit hard to comprehend here, but it looks like we wrap
489 * around in real mode as long as none of the individual "pusha" crosses the
490 * end of the stack segment. In protected mode we check the whole access
491 * in one go. For efficiency, only do the word-by-word thing if we're in
492 * danger of wrapping around.
493 */
494 /** @todo do pusha boundary / wrap-around checks. */
495 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
496 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
497 {
498 /* word-by-word */
499 RTUINT64U TmpRsp;
500 TmpRsp.u = pCtx->rsp;
501 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->eax, &TmpRsp);
502 if (rcStrict == VINF_SUCCESS)
503 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->ecx, &TmpRsp);
504 if (rcStrict == VINF_SUCCESS)
505 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->edx, &TmpRsp);
506 if (rcStrict == VINF_SUCCESS)
507 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->ebx, &TmpRsp);
508 if (rcStrict == VINF_SUCCESS)
509 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->esp, &TmpRsp);
510 if (rcStrict == VINF_SUCCESS)
511 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->ebp, &TmpRsp);
512 if (rcStrict == VINF_SUCCESS)
513 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->esi, &TmpRsp);
514 if (rcStrict == VINF_SUCCESS)
515 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->edi, &TmpRsp);
516 if (rcStrict == VINF_SUCCESS)
517 {
518 pCtx->rsp = TmpRsp.u;
519 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
520 }
521 }
522 else
523 {
524 GCPtrBottom--;
525 uint32_t *pa32Mem;
526 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
527 if (rcStrict == VINF_SUCCESS)
528 {
529 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
530 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
531 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
532 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
533 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
534 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
535 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
536 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
537 rcStrict = iemMemCommitAndUnmap(pVCpu, pa32Mem, IEM_ACCESS_STACK_W);
538 if (rcStrict == VINF_SUCCESS)
539 {
540 iemRegSubFromRsp(pVCpu, pCtx, 32);
541 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
542 }
543 }
544 }
545 return rcStrict;
546}
547
548
549/**
550 * Implements pushf.
551 *
552 *
553 * @param enmEffOpSize The effective operand size.
554 */
555IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
556{
557 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
558 VBOXSTRICTRC rcStrict;
559
560 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_PUSHF))
561 {
562 Log2(("pushf: Guest intercept -> #VMEXIT\n"));
563 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
564 }
565
566 /*
567 * If we're in V8086 mode some care is required (which is why we're in
568 * doing this in a C implementation).
569 */
570 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
571 if ( (fEfl & X86_EFL_VM)
572 && X86_EFL_GET_IOPL(fEfl) != 3 )
573 {
574 Assert(pCtx->cr0 & X86_CR0_PE);
575 if ( enmEffOpSize != IEMMODE_16BIT
576 || !(pCtx->cr4 & X86_CR4_VME))
577 return iemRaiseGeneralProtectionFault0(pVCpu);
578 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
579 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
580 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
581 }
582 else
583 {
584
585 /*
586 * Ok, clear RF and VM, adjust for ancient CPUs, and push the flags.
587 */
588 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
589
590 switch (enmEffOpSize)
591 {
592 case IEMMODE_16BIT:
593 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
594 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_186)
595 fEfl |= UINT16_C(0xf000);
596 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
597 break;
598 case IEMMODE_32BIT:
599 rcStrict = iemMemStackPushU32(pVCpu, fEfl);
600 break;
601 case IEMMODE_64BIT:
602 rcStrict = iemMemStackPushU64(pVCpu, fEfl);
603 break;
604 IEM_NOT_REACHED_DEFAULT_CASE_RET();
605 }
606 }
607 if (rcStrict != VINF_SUCCESS)
608 return rcStrict;
609
610 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
611 return VINF_SUCCESS;
612}
613
614
615/**
616 * Implements popf.
617 *
618 * @param enmEffOpSize The effective operand size.
619 */
620IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
621{
622 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
623 uint32_t const fEflOld = IEMMISC_GET_EFL(pVCpu, pCtx);
624 VBOXSTRICTRC rcStrict;
625 uint32_t fEflNew;
626
627 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_POPF))
628 {
629 Log2(("popf: Guest intercept -> #VMEXIT\n"));
630 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
631 }
632
633 /*
634 * V8086 is special as usual.
635 */
636 if (fEflOld & X86_EFL_VM)
637 {
638 /*
639 * Almost anything goes if IOPL is 3.
640 */
641 if (X86_EFL_GET_IOPL(fEflOld) == 3)
642 {
643 switch (enmEffOpSize)
644 {
645 case IEMMODE_16BIT:
646 {
647 uint16_t u16Value;
648 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
649 if (rcStrict != VINF_SUCCESS)
650 return rcStrict;
651 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
652 break;
653 }
654 case IEMMODE_32BIT:
655 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
656 if (rcStrict != VINF_SUCCESS)
657 return rcStrict;
658 break;
659 IEM_NOT_REACHED_DEFAULT_CASE_RET();
660 }
661
662 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
663 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
664 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
665 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
666 }
667 /*
668 * Interrupt flag virtualization with CR4.VME=1.
669 */
670 else if ( enmEffOpSize == IEMMODE_16BIT
671 && (pCtx->cr4 & X86_CR4_VME) )
672 {
673 uint16_t u16Value;
674 RTUINT64U TmpRsp;
675 TmpRsp.u = pCtx->rsp;
676 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp);
677 if (rcStrict != VINF_SUCCESS)
678 return rcStrict;
679
680 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
681 * or before? */
682 if ( ( (u16Value & X86_EFL_IF)
683 && (fEflOld & X86_EFL_VIP))
684 || (u16Value & X86_EFL_TF) )
685 return iemRaiseGeneralProtectionFault0(pVCpu);
686
687 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
688 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
689 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
690 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
691
692 pCtx->rsp = TmpRsp.u;
693 }
694 else
695 return iemRaiseGeneralProtectionFault0(pVCpu);
696
697 }
698 /*
699 * Not in V8086 mode.
700 */
701 else
702 {
703 /* Pop the flags. */
704 switch (enmEffOpSize)
705 {
706 case IEMMODE_16BIT:
707 {
708 uint16_t u16Value;
709 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
710 if (rcStrict != VINF_SUCCESS)
711 return rcStrict;
712 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
713
714 /*
715 * Ancient CPU adjustments:
716 * - 8086, 80186, V20/30:
717 * Fixed bits 15:12 bits are not kept correctly internally, mostly for
718 * practical reasons (masking below). We add them when pushing flags.
719 * - 80286:
720 * The NT and IOPL flags cannot be popped from real mode and are
721 * therefore always zero (since a 286 can never exit from PM and
722 * their initial value is zero). This changed on a 386 and can
723 * therefore be used to detect 286 or 386 CPU in real mode.
724 */
725 if ( IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286
726 && !(pCtx->cr0 & X86_CR0_PE) )
727 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL);
728 break;
729 }
730 case IEMMODE_32BIT:
731 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
732 if (rcStrict != VINF_SUCCESS)
733 return rcStrict;
734 break;
735 case IEMMODE_64BIT:
736 {
737 uint64_t u64Value;
738 rcStrict = iemMemStackPopU64(pVCpu, &u64Value);
739 if (rcStrict != VINF_SUCCESS)
740 return rcStrict;
741 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
742 break;
743 }
744 IEM_NOT_REACHED_DEFAULT_CASE_RET();
745 }
746
747 /* Merge them with the current flags. */
748 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
749 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
750 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
751 || pVCpu->iem.s.uCpl == 0)
752 {
753 fEflNew &= fPopfBits;
754 fEflNew |= ~fPopfBits & fEflOld;
755 }
756 else if (pVCpu->iem.s.uCpl <= X86_EFL_GET_IOPL(fEflOld))
757 {
758 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
759 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
760 }
761 else
762 {
763 fEflNew &= fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF);
764 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
765 }
766 }
767
768 /*
769 * Commit the flags.
770 */
771 Assert(fEflNew & RT_BIT_32(1));
772 IEMMISC_SET_EFL(pVCpu, pCtx, fEflNew);
773 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
774
775 return VINF_SUCCESS;
776}
777
778
779/**
780 * Implements an indirect call.
781 *
782 * @param uNewPC The new program counter (RIP) value (loaded from the
783 * operand).
784 * @param enmEffOpSize The effective operand size.
785 */
786IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
787{
788 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
789 uint16_t uOldPC = pCtx->ip + cbInstr;
790 if (uNewPC > pCtx->cs.u32Limit)
791 return iemRaiseGeneralProtectionFault0(pVCpu);
792
793 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
794 if (rcStrict != VINF_SUCCESS)
795 return rcStrict;
796
797 pCtx->rip = uNewPC;
798 pCtx->eflags.Bits.u1RF = 0;
799
800#ifndef IEM_WITH_CODE_TLB
801 /* Flush the prefetch buffer. */
802 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
803#endif
804 return VINF_SUCCESS;
805}
806
807
808/**
809 * Implements a 16-bit relative call.
810 *
811 * @param offDisp The displacment offset.
812 */
813IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
814{
815 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
816 uint16_t uOldPC = pCtx->ip + cbInstr;
817 uint16_t uNewPC = uOldPC + offDisp;
818 if (uNewPC > pCtx->cs.u32Limit)
819 return iemRaiseGeneralProtectionFault0(pVCpu);
820
821 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
822 if (rcStrict != VINF_SUCCESS)
823 return rcStrict;
824
825 pCtx->rip = uNewPC;
826 pCtx->eflags.Bits.u1RF = 0;
827
828#ifndef IEM_WITH_CODE_TLB
829 /* Flush the prefetch buffer. */
830 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
831#endif
832 return VINF_SUCCESS;
833}
834
835
836/**
837 * Implements a 32-bit indirect call.
838 *
839 * @param uNewPC The new program counter (RIP) value (loaded from the
840 * operand).
841 * @param enmEffOpSize The effective operand size.
842 */
843IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
844{
845 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
846 uint32_t uOldPC = pCtx->eip + cbInstr;
847 if (uNewPC > pCtx->cs.u32Limit)
848 return iemRaiseGeneralProtectionFault0(pVCpu);
849
850 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
851 if (rcStrict != VINF_SUCCESS)
852 return rcStrict;
853
854#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE) && defined(VBOX_WITH_CALL_RECORD)
855 /*
856 * CASM hook for recording interesting indirect calls.
857 */
858 if ( !pCtx->eflags.Bits.u1IF
859 && (pCtx->cr0 & X86_CR0_PG)
860 && !CSAMIsEnabled(pVCpu->CTX_SUFF(pVM))
861 && pVCpu->iem.s.uCpl == 0)
862 {
863 EMSTATE enmState = EMGetState(pVCpu);
864 if ( enmState == EMSTATE_IEM_THEN_REM
865 || enmState == EMSTATE_IEM
866 || enmState == EMSTATE_REM)
867 CSAMR3RecordCallAddress(pVCpu->CTX_SUFF(pVM), pCtx->eip);
868 }
869#endif
870
871 pCtx->rip = uNewPC;
872 pCtx->eflags.Bits.u1RF = 0;
873
874#ifndef IEM_WITH_CODE_TLB
875 /* Flush the prefetch buffer. */
876 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
877#endif
878 return VINF_SUCCESS;
879}
880
881
882/**
883 * Implements a 32-bit relative call.
884 *
885 * @param offDisp The displacment offset.
886 */
887IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
888{
889 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
890 uint32_t uOldPC = pCtx->eip + cbInstr;
891 uint32_t uNewPC = uOldPC + offDisp;
892 if (uNewPC > pCtx->cs.u32Limit)
893 return iemRaiseGeneralProtectionFault0(pVCpu);
894
895 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
896 if (rcStrict != VINF_SUCCESS)
897 return rcStrict;
898
899 pCtx->rip = uNewPC;
900 pCtx->eflags.Bits.u1RF = 0;
901
902#ifndef IEM_WITH_CODE_TLB
903 /* Flush the prefetch buffer. */
904 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
905#endif
906 return VINF_SUCCESS;
907}
908
909
910/**
911 * Implements a 64-bit indirect call.
912 *
913 * @param uNewPC The new program counter (RIP) value (loaded from the
914 * operand).
915 * @param enmEffOpSize The effective operand size.
916 */
917IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
918{
919 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
920 uint64_t uOldPC = pCtx->rip + cbInstr;
921 if (!IEM_IS_CANONICAL(uNewPC))
922 return iemRaiseGeneralProtectionFault0(pVCpu);
923
924 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
925 if (rcStrict != VINF_SUCCESS)
926 return rcStrict;
927
928 pCtx->rip = uNewPC;
929 pCtx->eflags.Bits.u1RF = 0;
930
931#ifndef IEM_WITH_CODE_TLB
932 /* Flush the prefetch buffer. */
933 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
934#endif
935 return VINF_SUCCESS;
936}
937
938
939/**
940 * Implements a 64-bit relative call.
941 *
942 * @param offDisp The displacment offset.
943 */
944IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
945{
946 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
947 uint64_t uOldPC = pCtx->rip + cbInstr;
948 uint64_t uNewPC = uOldPC + offDisp;
949 if (!IEM_IS_CANONICAL(uNewPC))
950 return iemRaiseNotCanonical(pVCpu);
951
952 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
953 if (rcStrict != VINF_SUCCESS)
954 return rcStrict;
955
956 pCtx->rip = uNewPC;
957 pCtx->eflags.Bits.u1RF = 0;
958
959#ifndef IEM_WITH_CODE_TLB
960 /* Flush the prefetch buffer. */
961 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
962#endif
963
964 return VINF_SUCCESS;
965}
966
967
968/**
969 * Implements far jumps and calls thru task segments (TSS).
970 *
971 * @param uSel The selector.
972 * @param enmBranch The kind of branching we're performing.
973 * @param enmEffOpSize The effective operand size.
974 * @param pDesc The descriptor corresponding to @a uSel. The type is
975 * task gate.
976 */
977IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
978{
979#ifndef IEM_IMPLEMENTS_TASKSWITCH
980 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
981#else
982 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
983 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
984 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
985 RT_NOREF_PV(enmEffOpSize);
986
987 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
988 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
989 {
990 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
991 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
992 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
993 }
994
995 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
996 * far calls (see iemCImpl_callf). Most likely in both cases it should be
997 * checked here, need testcases. */
998 if (!pDesc->Legacy.Gen.u1Present)
999 {
1000 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel));
1001 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1002 }
1003
1004 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1005 uint32_t uNextEip = pCtx->eip + cbInstr;
1006 return iemTaskSwitch(pVCpu, pCtx, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1007 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc);
1008#endif
1009}
1010
1011
1012/**
1013 * Implements far jumps and calls thru task gates.
1014 *
1015 * @param uSel The selector.
1016 * @param enmBranch The kind of branching we're performing.
1017 * @param enmEffOpSize The effective operand size.
1018 * @param pDesc The descriptor corresponding to @a uSel. The type is
1019 * task gate.
1020 */
1021IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1022{
1023#ifndef IEM_IMPLEMENTS_TASKSWITCH
1024 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1025#else
1026 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1027 RT_NOREF_PV(enmEffOpSize);
1028
1029 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
1030 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1031 {
1032 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1033 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1034 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1035 }
1036
1037 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
1038 * far calls (see iemCImpl_callf). Most likely in both cases it should be
1039 * checked here, need testcases. */
1040 if (!pDesc->Legacy.Gen.u1Present)
1041 {
1042 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel));
1043 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1044 }
1045
1046 /*
1047 * Fetch the new TSS descriptor from the GDT.
1048 */
1049 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel;
1050 if (uSelTss & X86_SEL_LDT)
1051 {
1052 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss));
1053 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1054 }
1055
1056 IEMSELDESC TssDesc;
1057 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelTss, X86_XCPT_GP);
1058 if (rcStrict != VINF_SUCCESS)
1059 return rcStrict;
1060
1061 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK)
1062 {
1063 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss,
1064 TssDesc.Legacy.Gate.u4Type));
1065 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1066 }
1067
1068 if (!TssDesc.Legacy.Gate.u1Present)
1069 {
1070 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss));
1071 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelTss & X86_SEL_MASK_OFF_RPL);
1072 }
1073
1074 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1075 uint32_t uNextEip = pCtx->eip + cbInstr;
1076 return iemTaskSwitch(pVCpu, pCtx, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1077 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc);
1078#endif
1079}
1080
1081
1082/**
1083 * Implements far jumps and calls thru call gates.
1084 *
1085 * @param uSel The selector.
1086 * @param enmBranch The kind of branching we're performing.
1087 * @param enmEffOpSize The effective operand size.
1088 * @param pDesc The descriptor corresponding to @a uSel. The type is
1089 * call gate.
1090 */
1091IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1092{
1093#define IEM_IMPLEMENTS_CALLGATE
1094#ifndef IEM_IMPLEMENTS_CALLGATE
1095 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1096#else
1097 RT_NOREF_PV(enmEffOpSize);
1098
1099 /* NB: Far jumps can only do intra-privilege transfers. Far calls support
1100 * inter-privilege calls and are much more complex.
1101 *
1102 * NB: 64-bit call gate has the same type as a 32-bit call gate! If
1103 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
1104 * must be 16-bit or 32-bit.
1105 */
1106 /** @todo: effective operand size is probably irrelevant here, only the
1107 * call gate bitness matters??
1108 */
1109 VBOXSTRICTRC rcStrict;
1110 RTPTRUNION uPtrRet;
1111 uint64_t uNewRsp;
1112 uint64_t uNewRip;
1113 uint64_t u64Base;
1114 uint32_t cbLimit;
1115 RTSEL uNewCS;
1116 IEMSELDESC DescCS;
1117
1118 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE);
1119 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1120 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE
1121 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE);
1122
1123 /* Determine the new instruction pointer from the gate descriptor. */
1124 uNewRip = pDesc->Legacy.Gate.u16OffsetLow
1125 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16)
1126 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32);
1127
1128 /* Perform DPL checks on the gate descriptor. */
1129 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
1130 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1131 {
1132 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1133 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1134 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1135 }
1136
1137 /** @todo does this catch NULL selectors, too? */
1138 if (!pDesc->Legacy.Gen.u1Present)
1139 {
1140 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel));
1141 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1142 }
1143
1144 /*
1145 * Fetch the target CS descriptor from the GDT or LDT.
1146 */
1147 uNewCS = pDesc->Legacy.Gate.u16Sel;
1148 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_GP);
1149 if (rcStrict != VINF_SUCCESS)
1150 return rcStrict;
1151
1152 /* Target CS must be a code selector. */
1153 if ( !DescCS.Legacy.Gen.u1DescType
1154 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1155 {
1156 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1157 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1158 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1159 }
1160
1161 /* Privilege checks on target CS. */
1162 if (enmBranch == IEMBRANCH_JUMP)
1163 {
1164 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1165 {
1166 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
1167 {
1168 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1169 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1170 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1171 }
1172 }
1173 else
1174 {
1175 if (DescCS.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl)
1176 {
1177 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1178 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1179 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1180 }
1181 }
1182 }
1183 else
1184 {
1185 Assert(enmBranch == IEMBRANCH_CALL);
1186 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
1187 {
1188 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1189 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1190 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1191 }
1192 }
1193
1194 /* Additional long mode checks. */
1195 if (IEM_IS_LONG_MODE(pVCpu))
1196 {
1197 if (!DescCS.Legacy.Gen.u1Long)
1198 {
1199 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS));
1200 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1201 }
1202
1203 /* L vs D. */
1204 if ( DescCS.Legacy.Gen.u1Long
1205 && DescCS.Legacy.Gen.u1DefBig)
1206 {
1207 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS));
1208 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1209 }
1210 }
1211
1212 if (!DescCS.Legacy.Gate.u1Present)
1213 {
1214 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS));
1215 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCS);
1216 }
1217
1218 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1219
1220 if (enmBranch == IEMBRANCH_JUMP)
1221 {
1222 /** @todo: This is very similar to regular far jumps; merge! */
1223 /* Jumps are fairly simple... */
1224
1225 /* Chop the high bits off if 16-bit gate (Intel says so). */
1226 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1227 uNewRip = (uint16_t)uNewRip;
1228
1229 /* Limit check for non-long segments. */
1230 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1231 if (DescCS.Legacy.Gen.u1Long)
1232 u64Base = 0;
1233 else
1234 {
1235 if (uNewRip > cbLimit)
1236 {
1237 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
1238 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1239 }
1240 u64Base = X86DESC_BASE(&DescCS.Legacy);
1241 }
1242
1243 /* Canonical address check. */
1244 if (!IEM_IS_CANONICAL(uNewRip))
1245 {
1246 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1247 return iemRaiseNotCanonical(pVCpu);
1248 }
1249
1250 /*
1251 * Ok, everything checked out fine. Now set the accessed bit before
1252 * committing the result into CS, CSHID and RIP.
1253 */
1254 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1255 {
1256 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1257 if (rcStrict != VINF_SUCCESS)
1258 return rcStrict;
1259 /** @todo check what VT-x and AMD-V does. */
1260 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1261 }
1262
1263 /* commit */
1264 pCtx->rip = uNewRip;
1265 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1266 pCtx->cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */
1267 pCtx->cs.ValidSel = pCtx->cs.Sel;
1268 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1269 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1270 pCtx->cs.u32Limit = cbLimit;
1271 pCtx->cs.u64Base = u64Base;
1272 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1273 }
1274 else
1275 {
1276 Assert(enmBranch == IEMBRANCH_CALL);
1277 /* Calls are much more complicated. */
1278
1279 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) && (DescCS.Legacy.Gen.u2Dpl < pVCpu->iem.s.uCpl))
1280 {
1281 uint16_t offNewStack; /* Offset of new stack in TSS. */
1282 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */
1283 uint8_t uNewCSDpl;
1284 uint8_t cbWords;
1285 RTSEL uNewSS;
1286 RTSEL uOldSS;
1287 uint64_t uOldRsp;
1288 IEMSELDESC DescSS;
1289 RTPTRUNION uPtrTSS;
1290 RTGCPTR GCPtrTSS;
1291 RTPTRUNION uPtrParmWds;
1292 RTGCPTR GCPtrParmWds;
1293
1294 /* More privilege. This is the fun part. */
1295 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */
1296
1297 /*
1298 * Determine new SS:rSP from the TSS.
1299 */
1300 Assert(!pCtx->tr.Attr.n.u1DescType);
1301
1302 /* Figure out where the new stack pointer is stored in the TSS. */
1303 uNewCSDpl = DescCS.Legacy.Gen.u2Dpl;
1304 if (!IEM_IS_LONG_MODE(pVCpu))
1305 {
1306 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1307 {
1308 offNewStack = RT_OFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8;
1309 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0);
1310 }
1311 else
1312 {
1313 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1314 offNewStack = RT_OFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4;
1315 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0);
1316 }
1317 }
1318 else
1319 {
1320 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1321 offNewStack = RT_OFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0);
1322 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0);
1323 }
1324
1325 /* Check against TSS limit. */
1326 if ((uint16_t)(offNewStack + cbNewStack - 1) > pCtx->tr.u32Limit)
1327 {
1328 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pCtx->tr.u32Limit));
1329 return iemRaiseTaskSwitchFaultBySelector(pVCpu, pCtx->tr.Sel);
1330 }
1331
1332 GCPtrTSS = pCtx->tr.u64Base + offNewStack;
1333 rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R);
1334 if (rcStrict != VINF_SUCCESS)
1335 {
1336 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1337 return rcStrict;
1338 }
1339
1340 if (!IEM_IS_LONG_MODE(pVCpu))
1341 {
1342 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1343 {
1344 uNewRsp = uPtrTSS.pu32[0];
1345 uNewSS = uPtrTSS.pu16[2];
1346 }
1347 else
1348 {
1349 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1350 uNewRsp = uPtrTSS.pu16[0];
1351 uNewSS = uPtrTSS.pu16[1];
1352 }
1353 }
1354 else
1355 {
1356 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1357 /* SS will be a NULL selector, but that's valid. */
1358 uNewRsp = uPtrTSS.pu64[0];
1359 uNewSS = uNewCSDpl;
1360 }
1361
1362 /* Done with the TSS now. */
1363 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);
1364 if (rcStrict != VINF_SUCCESS)
1365 {
1366 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1367 return rcStrict;
1368 }
1369
1370 /* Only used outside of long mode. */
1371 cbWords = pDesc->Legacy.Gate.u5ParmCount;
1372
1373 /* If EFER.LMA is 0, there's extra work to do. */
1374 if (!IEM_IS_LONG_MODE(pVCpu))
1375 {
1376 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
1377 {
1378 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
1379 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1380 }
1381
1382 /* Grab the new SS descriptor. */
1383 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1384 if (rcStrict != VINF_SUCCESS)
1385 return rcStrict;
1386
1387 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */
1388 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL))
1389 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl))
1390 {
1391 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n",
1392 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl));
1393 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1394 }
1395
1396 /* Ensure new SS is a writable data segment. */
1397 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
1398 {
1399 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
1400 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1401 }
1402
1403 if (!DescSS.Legacy.Gen.u1Present)
1404 {
1405 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS));
1406 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
1407 }
1408 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1409 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords);
1410 else
1411 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords);
1412 }
1413 else
1414 {
1415 /* Just grab the new (NULL) SS descriptor. */
1416 /** @todo testcase: Check whether the zero GDT entry is actually loaded here
1417 * like we do... */
1418 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1419 if (rcStrict != VINF_SUCCESS)
1420 return rcStrict;
1421
1422 cbNewStack = sizeof(uint64_t) * 4;
1423 }
1424
1425 /** @todo: According to Intel, new stack is checked for enough space first,
1426 * then switched. According to AMD, the stack is switched first and
1427 * then pushes might fault!
1428 * NB: OS/2 Warp 3/4 actively relies on the fact that possible
1429 * incoming stack #PF happens before actual stack switch. AMD is
1430 * either lying or implicitly assumes that new state is committed
1431 * only if and when an instruction doesn't fault.
1432 */
1433
1434 /** @todo: According to AMD, CS is loaded first, then SS.
1435 * According to Intel, it's the other way around!?
1436 */
1437
1438 /** @todo: Intel and AMD disagree on when exactly the CPL changes! */
1439
1440 /* Set the accessed bit before committing new SS. */
1441 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1442 {
1443 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
1444 if (rcStrict != VINF_SUCCESS)
1445 return rcStrict;
1446 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1447 }
1448
1449 /* Remember the old SS:rSP and their linear address. */
1450 uOldSS = pCtx->ss.Sel;
1451 uOldRsp = pCtx->ss.Attr.n.u1DefBig ? pCtx->rsp : pCtx->sp;
1452
1453 GCPtrParmWds = pCtx->ss.u64Base + uOldRsp;
1454
1455 /* HACK ALERT! Probe if the write to the new stack will succeed. May #SS(NewSS)
1456 or #PF, the former is not implemented in this workaround. */
1457 /** @todo Proper fix callgate target stack exceptions. */
1458 /** @todo testcase: Cover callgates with partially or fully inaccessible
1459 * target stacks. */
1460 void *pvNewFrame;
1461 RTGCPTR GCPtrNewStack = X86DESC_BASE(&DescSS.Legacy) + uNewRsp - cbNewStack;
1462 rcStrict = iemMemMap(pVCpu, &pvNewFrame, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW);
1463 if (rcStrict != VINF_SUCCESS)
1464 {
1465 Log(("BranchCallGate: Incoming stack (%04x:%08RX64) not accessible, rc=%Rrc\n", uNewSS, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
1466 return rcStrict;
1467 }
1468 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewFrame, IEM_ACCESS_SYS_RW);
1469 if (rcStrict != VINF_SUCCESS)
1470 {
1471 Log(("BranchCallGate: New stack probe unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1472 return rcStrict;
1473 }
1474
1475 /* Commit new SS:rSP. */
1476 pCtx->ss.Sel = uNewSS;
1477 pCtx->ss.ValidSel = uNewSS;
1478 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1479 pCtx->ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);
1480 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1481 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1482 pCtx->rsp = uNewRsp;
1483 pVCpu->iem.s.uCpl = uNewCSDpl;
1484 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1485 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
1486
1487 /* At this point the stack access must not fail because new state was already committed. */
1488 /** @todo this can still fail due to SS.LIMIT not check. */
1489 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbNewStack,
1490 &uPtrRet.pv, &uNewRsp);
1491 AssertMsgReturn(rcStrict == VINF_SUCCESS, ("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)),
1492 VERR_INTERNAL_ERROR_5);
1493
1494 if (!IEM_IS_LONG_MODE(pVCpu))
1495 {
1496 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1497 {
1498 /* Push the old CS:rIP. */
1499 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1500 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1501
1502 if (cbWords)
1503 {
1504 /* Map the relevant chunk of the old stack. */
1505 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1506 if (rcStrict != VINF_SUCCESS)
1507 {
1508 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1509 return rcStrict;
1510 }
1511
1512 /* Copy the parameter (d)words. */
1513 for (int i = 0; i < cbWords; ++i)
1514 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i];
1515
1516 /* Unmap the old stack. */
1517 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1518 if (rcStrict != VINF_SUCCESS)
1519 {
1520 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1521 return rcStrict;
1522 }
1523 }
1524
1525 /* Push the old SS:rSP. */
1526 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
1527 uPtrRet.pu32[2 + cbWords + 1] = uOldSS;
1528 }
1529 else
1530 {
1531 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1532
1533 /* Push the old CS:rIP. */
1534 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1535 uPtrRet.pu16[1] = pCtx->cs.Sel;
1536
1537 if (cbWords)
1538 {
1539 /* Map the relevant chunk of the old stack. */
1540 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1541 if (rcStrict != VINF_SUCCESS)
1542 {
1543 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1544 return rcStrict;
1545 }
1546
1547 /* Copy the parameter words. */
1548 for (int i = 0; i < cbWords; ++i)
1549 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i];
1550
1551 /* Unmap the old stack. */
1552 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1553 if (rcStrict != VINF_SUCCESS)
1554 {
1555 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1556 return rcStrict;
1557 }
1558 }
1559
1560 /* Push the old SS:rSP. */
1561 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp;
1562 uPtrRet.pu16[2 + cbWords + 1] = uOldSS;
1563 }
1564 }
1565 else
1566 {
1567 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1568
1569 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
1570 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1571 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1572 uPtrRet.pu64[2] = uOldRsp;
1573 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */
1574 }
1575
1576 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1577 if (rcStrict != VINF_SUCCESS)
1578 {
1579 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1580 return rcStrict;
1581 }
1582
1583 /* Chop the high bits off if 16-bit gate (Intel says so). */
1584 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1585 uNewRip = (uint16_t)uNewRip;
1586
1587 /* Limit / canonical check. */
1588 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1589 if (!IEM_IS_LONG_MODE(pVCpu))
1590 {
1591 if (uNewRip > cbLimit)
1592 {
1593 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1594 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1595 }
1596 u64Base = X86DESC_BASE(&DescCS.Legacy);
1597 }
1598 else
1599 {
1600 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1601 if (!IEM_IS_CANONICAL(uNewRip))
1602 {
1603 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1604 return iemRaiseNotCanonical(pVCpu);
1605 }
1606 u64Base = 0;
1607 }
1608
1609 /*
1610 * Now set the accessed bit before
1611 * writing the return address to the stack and committing the result into
1612 * CS, CSHID and RIP.
1613 */
1614 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1615 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1616 {
1617 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1618 if (rcStrict != VINF_SUCCESS)
1619 return rcStrict;
1620 /** @todo check what VT-x and AMD-V does. */
1621 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1622 }
1623
1624 /* Commit new CS:rIP. */
1625 pCtx->rip = uNewRip;
1626 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1627 pCtx->cs.Sel |= pVCpu->iem.s.uCpl;
1628 pCtx->cs.ValidSel = pCtx->cs.Sel;
1629 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1630 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1631 pCtx->cs.u32Limit = cbLimit;
1632 pCtx->cs.u64Base = u64Base;
1633 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1634 }
1635 else
1636 {
1637 /* Same privilege. */
1638 /** @todo: This is very similar to regular far calls; merge! */
1639
1640 /* Check stack first - may #SS(0). */
1641 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
1642 * 16-bit code cause a two or four byte CS to be pushed? */
1643 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
1644 IEM_IS_LONG_MODE(pVCpu) ? 8+8
1645 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
1646 &uPtrRet.pv, &uNewRsp);
1647 if (rcStrict != VINF_SUCCESS)
1648 return rcStrict;
1649
1650 /* Chop the high bits off if 16-bit gate (Intel says so). */
1651 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1652 uNewRip = (uint16_t)uNewRip;
1653
1654 /* Limit / canonical check. */
1655 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1656 if (!IEM_IS_LONG_MODE(pVCpu))
1657 {
1658 if (uNewRip > cbLimit)
1659 {
1660 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1661 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1662 }
1663 u64Base = X86DESC_BASE(&DescCS.Legacy);
1664 }
1665 else
1666 {
1667 if (!IEM_IS_CANONICAL(uNewRip))
1668 {
1669 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1670 return iemRaiseNotCanonical(pVCpu);
1671 }
1672 u64Base = 0;
1673 }
1674
1675 /*
1676 * Now set the accessed bit before
1677 * writing the return address to the stack and committing the result into
1678 * CS, CSHID and RIP.
1679 */
1680 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1681 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1682 {
1683 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1684 if (rcStrict != VINF_SUCCESS)
1685 return rcStrict;
1686 /** @todo check what VT-x and AMD-V does. */
1687 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1688 }
1689
1690 /* stack */
1691 if (!IEM_IS_LONG_MODE(pVCpu))
1692 {
1693 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1694 {
1695 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1696 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1697 }
1698 else
1699 {
1700 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1701 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1702 uPtrRet.pu16[1] = pCtx->cs.Sel;
1703 }
1704 }
1705 else
1706 {
1707 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1708 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1709 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1710 }
1711
1712 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1713 if (rcStrict != VINF_SUCCESS)
1714 return rcStrict;
1715
1716 /* commit */
1717 pCtx->rip = uNewRip;
1718 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1719 pCtx->cs.Sel |= pVCpu->iem.s.uCpl;
1720 pCtx->cs.ValidSel = pCtx->cs.Sel;
1721 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1722 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1723 pCtx->cs.u32Limit = cbLimit;
1724 pCtx->cs.u64Base = u64Base;
1725 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1726 }
1727 }
1728 pCtx->eflags.Bits.u1RF = 0;
1729
1730 /* Flush the prefetch buffer. */
1731# ifdef IEM_WITH_CODE_TLB
1732 pVCpu->iem.s.pbInstrBuf = NULL;
1733# else
1734 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
1735# endif
1736 return VINF_SUCCESS;
1737#endif
1738}
1739
1740
1741/**
1742 * Implements far jumps and calls thru system selectors.
1743 *
1744 * @param uSel The selector.
1745 * @param enmBranch The kind of branching we're performing.
1746 * @param enmEffOpSize The effective operand size.
1747 * @param pDesc The descriptor corresponding to @a uSel.
1748 */
1749IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1750{
1751 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1752 Assert((uSel & X86_SEL_MASK_OFF_RPL));
1753
1754 if (IEM_IS_LONG_MODE(pVCpu))
1755 switch (pDesc->Legacy.Gen.u4Type)
1756 {
1757 case AMD64_SEL_TYPE_SYS_CALL_GATE:
1758 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1759
1760 default:
1761 case AMD64_SEL_TYPE_SYS_LDT:
1762 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
1763 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
1764 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
1765 case AMD64_SEL_TYPE_SYS_INT_GATE:
1766 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1767 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1768 }
1769
1770 switch (pDesc->Legacy.Gen.u4Type)
1771 {
1772 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1773 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1774 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1775
1776 case X86_SEL_TYPE_SYS_TASK_GATE:
1777 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
1778
1779 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1780 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1781 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
1782
1783 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1784 Log(("branch %04x -> busy 286 TSS\n", uSel));
1785 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1786
1787 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1788 Log(("branch %04x -> busy 386 TSS\n", uSel));
1789 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1790
1791 default:
1792 case X86_SEL_TYPE_SYS_LDT:
1793 case X86_SEL_TYPE_SYS_286_INT_GATE:
1794 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1795 case X86_SEL_TYPE_SYS_386_INT_GATE:
1796 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1797 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1798 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1799 }
1800}
1801
1802
1803/**
1804 * Implements far jumps.
1805 *
1806 * @param uSel The selector.
1807 * @param offSeg The segment offset.
1808 * @param enmEffOpSize The effective operand size.
1809 */
1810IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1811{
1812 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1813 NOREF(cbInstr);
1814 Assert(offSeg <= UINT32_MAX);
1815
1816 /*
1817 * Real mode and V8086 mode are easy. The only snag seems to be that
1818 * CS.limit doesn't change and the limit check is done against the current
1819 * limit.
1820 */
1821 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT
1822 && IEM_IS_REAL_OR_V86_MODE(pVCpu))
1823 {
1824 if (offSeg > pCtx->cs.u32Limit)
1825 {
1826 Log(("iemCImpl_FarJmp: 16-bit limit\n"));
1827 return iemRaiseGeneralProtectionFault0(pVCpu);
1828 }
1829
1830 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
1831 pCtx->rip = offSeg;
1832 else
1833 pCtx->rip = offSeg & UINT16_MAX;
1834 pCtx->cs.Sel = uSel;
1835 pCtx->cs.ValidSel = uSel;
1836 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1837 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1838 pCtx->eflags.Bits.u1RF = 0;
1839 return VINF_SUCCESS;
1840 }
1841
1842 /*
1843 * Protected mode. Need to parse the specified descriptor...
1844 */
1845 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1846 {
1847 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1848 return iemRaiseGeneralProtectionFault0(pVCpu);
1849 }
1850
1851 /* Fetch the descriptor. */
1852 IEMSELDESC Desc;
1853 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
1854 if (rcStrict != VINF_SUCCESS)
1855 return rcStrict;
1856
1857 /* Is it there? */
1858 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
1859 {
1860 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1861 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1862 }
1863
1864 /*
1865 * Deal with it according to its type. We do the standard code selectors
1866 * here and dispatch the system selectors to worker functions.
1867 */
1868 if (!Desc.Legacy.Gen.u1DescType)
1869 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
1870
1871 /* Only code segments. */
1872 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1873 {
1874 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1875 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1876 }
1877
1878 /* L vs D. */
1879 if ( Desc.Legacy.Gen.u1Long
1880 && Desc.Legacy.Gen.u1DefBig
1881 && IEM_IS_LONG_MODE(pVCpu))
1882 {
1883 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1884 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1885 }
1886
1887 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1888 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1889 {
1890 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl)
1891 {
1892 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1893 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1894 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1895 }
1896 }
1897 else
1898 {
1899 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl)
1900 {
1901 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1902 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1903 }
1904 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl)
1905 {
1906 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl));
1907 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1908 }
1909 }
1910
1911 /* Chop the high bits if 16-bit (Intel says so). */
1912 if (enmEffOpSize == IEMMODE_16BIT)
1913 offSeg &= UINT16_MAX;
1914
1915 /* Limit check. (Should alternatively check for non-canonical addresses
1916 here, but that is ruled out by offSeg being 32-bit, right?) */
1917 uint64_t u64Base;
1918 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1919 if (Desc.Legacy.Gen.u1Long)
1920 u64Base = 0;
1921 else
1922 {
1923 if (offSeg > cbLimit)
1924 {
1925 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1926 /** @todo: Intel says this is #GP(0)! */
1927 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1928 }
1929 u64Base = X86DESC_BASE(&Desc.Legacy);
1930 }
1931
1932 /*
1933 * Ok, everything checked out fine. Now set the accessed bit before
1934 * committing the result into CS, CSHID and RIP.
1935 */
1936 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1937 {
1938 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
1939 if (rcStrict != VINF_SUCCESS)
1940 return rcStrict;
1941 /** @todo check what VT-x and AMD-V does. */
1942 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1943 }
1944
1945 /* commit */
1946 pCtx->rip = offSeg;
1947 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1948 pCtx->cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */
1949 pCtx->cs.ValidSel = pCtx->cs.Sel;
1950 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1951 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1952 pCtx->cs.u32Limit = cbLimit;
1953 pCtx->cs.u64Base = u64Base;
1954 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1955 pCtx->eflags.Bits.u1RF = 0;
1956 /** @todo check if the hidden bits are loaded correctly for 64-bit
1957 * mode. */
1958
1959 /* Flush the prefetch buffer. */
1960#ifdef IEM_WITH_CODE_TLB
1961 pVCpu->iem.s.pbInstrBuf = NULL;
1962#else
1963 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
1964#endif
1965
1966 return VINF_SUCCESS;
1967}
1968
1969
1970/**
1971 * Implements far calls.
1972 *
1973 * This very similar to iemCImpl_FarJmp.
1974 *
1975 * @param uSel The selector.
1976 * @param offSeg The segment offset.
1977 * @param enmEffOpSize The operand size (in case we need it).
1978 */
1979IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1980{
1981 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1982 VBOXSTRICTRC rcStrict;
1983 uint64_t uNewRsp;
1984 RTPTRUNION uPtrRet;
1985
1986 /*
1987 * Real mode and V8086 mode are easy. The only snag seems to be that
1988 * CS.limit doesn't change and the limit check is done against the current
1989 * limit.
1990 */
1991 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT
1992 && IEM_IS_REAL_OR_V86_MODE(pVCpu))
1993 {
1994 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1995
1996 /* Check stack first - may #SS(0). */
1997 rcStrict = iemMemStackPushBeginSpecial(pVCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1998 &uPtrRet.pv, &uNewRsp);
1999 if (rcStrict != VINF_SUCCESS)
2000 return rcStrict;
2001
2002 /* Check the target address range. */
2003 if (offSeg > UINT32_MAX)
2004 return iemRaiseGeneralProtectionFault0(pVCpu);
2005
2006 /* Everything is fine, push the return address. */
2007 if (enmEffOpSize == IEMMODE_16BIT)
2008 {
2009 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
2010 uPtrRet.pu16[1] = pCtx->cs.Sel;
2011 }
2012 else
2013 {
2014 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
2015 uPtrRet.pu16[3] = pCtx->cs.Sel;
2016 }
2017 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2018 if (rcStrict != VINF_SUCCESS)
2019 return rcStrict;
2020
2021 /* Branch. */
2022 pCtx->rip = offSeg;
2023 pCtx->cs.Sel = uSel;
2024 pCtx->cs.ValidSel = uSel;
2025 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2026 pCtx->cs.u64Base = (uint32_t)uSel << 4;
2027 pCtx->eflags.Bits.u1RF = 0;
2028 return VINF_SUCCESS;
2029 }
2030
2031 /*
2032 * Protected mode. Need to parse the specified descriptor...
2033 */
2034 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2035 {
2036 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
2037 return iemRaiseGeneralProtectionFault0(pVCpu);
2038 }
2039
2040 /* Fetch the descriptor. */
2041 IEMSELDESC Desc;
2042 rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
2043 if (rcStrict != VINF_SUCCESS)
2044 return rcStrict;
2045
2046 /*
2047 * Deal with it according to its type. We do the standard code selectors
2048 * here and dispatch the system selectors to worker functions.
2049 */
2050 if (!Desc.Legacy.Gen.u1DescType)
2051 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
2052
2053 /* Only code segments. */
2054 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2055 {
2056 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
2057 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2058 }
2059
2060 /* L vs D. */
2061 if ( Desc.Legacy.Gen.u1Long
2062 && Desc.Legacy.Gen.u1DefBig
2063 && IEM_IS_LONG_MODE(pVCpu))
2064 {
2065 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
2066 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2067 }
2068
2069 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
2070 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2071 {
2072 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl)
2073 {
2074 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
2075 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2076 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2077 }
2078 }
2079 else
2080 {
2081 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl)
2082 {
2083 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2084 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2085 }
2086 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl)
2087 {
2088 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl));
2089 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2090 }
2091 }
2092
2093 /* Is it there? */
2094 if (!Desc.Legacy.Gen.u1Present)
2095 {
2096 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
2097 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
2098 }
2099
2100 /* Check stack first - may #SS(0). */
2101 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
2102 * 16-bit code cause a two or four byte CS to be pushed? */
2103 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
2104 enmEffOpSize == IEMMODE_64BIT ? 8+8
2105 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2106 &uPtrRet.pv, &uNewRsp);
2107 if (rcStrict != VINF_SUCCESS)
2108 return rcStrict;
2109
2110 /* Chop the high bits if 16-bit (Intel says so). */
2111 if (enmEffOpSize == IEMMODE_16BIT)
2112 offSeg &= UINT16_MAX;
2113
2114 /* Limit / canonical check. */
2115 uint64_t u64Base;
2116 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2117 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2118 {
2119 if (!IEM_IS_CANONICAL(offSeg))
2120 {
2121 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
2122 return iemRaiseNotCanonical(pVCpu);
2123 }
2124 u64Base = 0;
2125 }
2126 else
2127 {
2128 if (offSeg > cbLimit)
2129 {
2130 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
2131 /** @todo: Intel says this is #GP(0)! */
2132 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2133 }
2134 u64Base = X86DESC_BASE(&Desc.Legacy);
2135 }
2136
2137 /*
2138 * Now set the accessed bit before
2139 * writing the return address to the stack and committing the result into
2140 * CS, CSHID and RIP.
2141 */
2142 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2143 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2144 {
2145 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2146 if (rcStrict != VINF_SUCCESS)
2147 return rcStrict;
2148 /** @todo check what VT-x and AMD-V does. */
2149 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2150 }
2151
2152 /* stack */
2153 if (enmEffOpSize == IEMMODE_16BIT)
2154 {
2155 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
2156 uPtrRet.pu16[1] = pCtx->cs.Sel;
2157 }
2158 else if (enmEffOpSize == IEMMODE_32BIT)
2159 {
2160 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
2161 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
2162 }
2163 else
2164 {
2165 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
2166 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
2167 }
2168 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2169 if (rcStrict != VINF_SUCCESS)
2170 return rcStrict;
2171
2172 /* commit */
2173 pCtx->rip = offSeg;
2174 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2175 pCtx->cs.Sel |= pVCpu->iem.s.uCpl;
2176 pCtx->cs.ValidSel = pCtx->cs.Sel;
2177 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2178 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2179 pCtx->cs.u32Limit = cbLimit;
2180 pCtx->cs.u64Base = u64Base;
2181 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
2182 pCtx->eflags.Bits.u1RF = 0;
2183 /** @todo check if the hidden bits are loaded correctly for 64-bit
2184 * mode. */
2185
2186 /* Flush the prefetch buffer. */
2187#ifdef IEM_WITH_CODE_TLB
2188 pVCpu->iem.s.pbInstrBuf = NULL;
2189#else
2190 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2191#endif
2192 return VINF_SUCCESS;
2193}
2194
2195
2196/**
2197 * Implements retf.
2198 *
2199 * @param enmEffOpSize The effective operand size.
2200 * @param cbPop The amount of arguments to pop from the stack
2201 * (bytes).
2202 */
2203IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2204{
2205 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2206 VBOXSTRICTRC rcStrict;
2207 RTCPTRUNION uPtrFrame;
2208 uint64_t uNewRsp;
2209 uint64_t uNewRip;
2210 uint16_t uNewCs;
2211 NOREF(cbInstr);
2212
2213 /*
2214 * Read the stack values first.
2215 */
2216 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
2217 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
2218 rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
2219 if (rcStrict != VINF_SUCCESS)
2220 return rcStrict;
2221 if (enmEffOpSize == IEMMODE_16BIT)
2222 {
2223 uNewRip = uPtrFrame.pu16[0];
2224 uNewCs = uPtrFrame.pu16[1];
2225 }
2226 else if (enmEffOpSize == IEMMODE_32BIT)
2227 {
2228 uNewRip = uPtrFrame.pu32[0];
2229 uNewCs = uPtrFrame.pu16[2];
2230 }
2231 else
2232 {
2233 uNewRip = uPtrFrame.pu64[0];
2234 uNewCs = uPtrFrame.pu16[4];
2235 }
2236 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2237 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2238 { /* extremely likely */ }
2239 else
2240 return rcStrict;
2241
2242 /*
2243 * Real mode and V8086 mode are easy.
2244 */
2245 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT
2246 && IEM_IS_REAL_OR_V86_MODE(pVCpu))
2247 {
2248 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2249 /** @todo check how this is supposed to work if sp=0xfffe. */
2250
2251 /* Check the limit of the new EIP. */
2252 /** @todo Intel pseudo code only does the limit check for 16-bit
2253 * operands, AMD does not make any distinction. What is right? */
2254 if (uNewRip > pCtx->cs.u32Limit)
2255 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2256
2257 /* commit the operation. */
2258 pCtx->rsp = uNewRsp;
2259 pCtx->rip = uNewRip;
2260 pCtx->cs.Sel = uNewCs;
2261 pCtx->cs.ValidSel = uNewCs;
2262 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2263 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2264 pCtx->eflags.Bits.u1RF = 0;
2265 /** @todo do we load attribs and limit as well? */
2266 if (cbPop)
2267 iemRegAddToRsp(pVCpu, pCtx, cbPop);
2268 return VINF_SUCCESS;
2269 }
2270
2271 /*
2272 * Protected mode is complicated, of course.
2273 */
2274 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2275 {
2276 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
2277 return iemRaiseGeneralProtectionFault0(pVCpu);
2278 }
2279
2280 /* Fetch the descriptor. */
2281 IEMSELDESC DescCs;
2282 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCs, uNewCs, X86_XCPT_GP);
2283 if (rcStrict != VINF_SUCCESS)
2284 return rcStrict;
2285
2286 /* Can only return to a code selector. */
2287 if ( !DescCs.Legacy.Gen.u1DescType
2288 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
2289 {
2290 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
2291 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
2292 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2293 }
2294
2295 /* L vs D. */
2296 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
2297 && DescCs.Legacy.Gen.u1DefBig
2298 && IEM_IS_LONG_MODE(pVCpu))
2299 {
2300 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
2301 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2302 }
2303
2304 /* DPL/RPL/CPL checks. */
2305 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
2306 {
2307 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl));
2308 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2309 }
2310
2311 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2312 {
2313 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
2314 {
2315 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
2316 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2317 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2318 }
2319 }
2320 else
2321 {
2322 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
2323 {
2324 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
2325 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2326 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2327 }
2328 }
2329
2330 /* Is it there? */
2331 if (!DescCs.Legacy.Gen.u1Present)
2332 {
2333 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
2334 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2335 }
2336
2337 /*
2338 * Return to outer privilege? (We'll typically have entered via a call gate.)
2339 */
2340 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
2341 {
2342 /* Read the outer stack pointer stored *after* the parameters. */
2343 rcStrict = iemMemStackPopContinueSpecial(pVCpu, cbPop + cbRetPtr, &uPtrFrame.pv, &uNewRsp);
2344 if (rcStrict != VINF_SUCCESS)
2345 return rcStrict;
2346
2347 uPtrFrame.pu8 += cbPop; /* Skip the parameters. */
2348
2349 uint16_t uNewOuterSs;
2350 uint64_t uNewOuterRsp;
2351 if (enmEffOpSize == IEMMODE_16BIT)
2352 {
2353 uNewOuterRsp = uPtrFrame.pu16[0];
2354 uNewOuterSs = uPtrFrame.pu16[1];
2355 }
2356 else if (enmEffOpSize == IEMMODE_32BIT)
2357 {
2358 uNewOuterRsp = uPtrFrame.pu32[0];
2359 uNewOuterSs = uPtrFrame.pu16[2];
2360 }
2361 else
2362 {
2363 uNewOuterRsp = uPtrFrame.pu64[0];
2364 uNewOuterSs = uPtrFrame.pu16[4];
2365 }
2366 uPtrFrame.pu8 -= cbPop; /* Put uPtrFrame back the way it was. */
2367 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2368 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2369 { /* extremely likely */ }
2370 else
2371 return rcStrict;
2372
2373 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
2374 and read the selector. */
2375 IEMSELDESC DescSs;
2376 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
2377 {
2378 if ( !DescCs.Legacy.Gen.u1Long
2379 || (uNewOuterSs & X86_SEL_RPL) == 3)
2380 {
2381 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
2382 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2383 return iemRaiseGeneralProtectionFault0(pVCpu);
2384 }
2385 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
2386 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
2387 }
2388 else
2389 {
2390 /* Fetch the descriptor for the new stack segment. */
2391 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);
2392 if (rcStrict != VINF_SUCCESS)
2393 return rcStrict;
2394 }
2395
2396 /* Check that RPL of stack and code selectors match. */
2397 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
2398 {
2399 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2400 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2401 }
2402
2403 /* Must be a writable data segment. */
2404 if ( !DescSs.Legacy.Gen.u1DescType
2405 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2406 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2407 {
2408 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
2409 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
2410 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2411 }
2412
2413 /* L vs D. (Not mentioned by intel.) */
2414 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
2415 && DescSs.Legacy.Gen.u1DefBig
2416 && IEM_IS_LONG_MODE(pVCpu))
2417 {
2418 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
2419 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2420 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2421 }
2422
2423 /* DPL/RPL/CPL checks. */
2424 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2425 {
2426 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
2427 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
2428 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2429 }
2430
2431 /* Is it there? */
2432 if (!DescSs.Legacy.Gen.u1Present)
2433 {
2434 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2435 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2436 }
2437
2438 /* Calc SS limit.*/
2439 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
2440
2441 /* Is RIP canonical or within CS.limit? */
2442 uint64_t u64Base;
2443 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2444
2445 /** @todo Testcase: Is this correct? */
2446 if ( DescCs.Legacy.Gen.u1Long
2447 && IEM_IS_LONG_MODE(pVCpu) )
2448 {
2449 if (!IEM_IS_CANONICAL(uNewRip))
2450 {
2451 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2452 return iemRaiseNotCanonical(pVCpu);
2453 }
2454 u64Base = 0;
2455 }
2456 else
2457 {
2458 if (uNewRip > cbLimitCs)
2459 {
2460 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
2461 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
2462 /** @todo: Intel says this is #GP(0)! */
2463 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2464 }
2465 u64Base = X86DESC_BASE(&DescCs.Legacy);
2466 }
2467
2468 /*
2469 * Now set the accessed bit before
2470 * writing the return address to the stack and committing the result into
2471 * CS, CSHID and RIP.
2472 */
2473 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
2474 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2475 {
2476 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2477 if (rcStrict != VINF_SUCCESS)
2478 return rcStrict;
2479 /** @todo check what VT-x and AMD-V does. */
2480 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2481 }
2482 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
2483 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2484 {
2485 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewOuterSs);
2486 if (rcStrict != VINF_SUCCESS)
2487 return rcStrict;
2488 /** @todo check what VT-x and AMD-V does. */
2489 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2490 }
2491
2492 /* commit */
2493 if (enmEffOpSize == IEMMODE_16BIT)
2494 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2495 else
2496 pCtx->rip = uNewRip;
2497 pCtx->cs.Sel = uNewCs;
2498 pCtx->cs.ValidSel = uNewCs;
2499 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2500 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2501 pCtx->cs.u32Limit = cbLimitCs;
2502 pCtx->cs.u64Base = u64Base;
2503 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
2504 pCtx->ss.Sel = uNewOuterSs;
2505 pCtx->ss.ValidSel = uNewOuterSs;
2506 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2507 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
2508 pCtx->ss.u32Limit = cbLimitSs;
2509 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2510 pCtx->ss.u64Base = 0;
2511 else
2512 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
2513 if (!pCtx->ss.Attr.n.u1DefBig)
2514 pCtx->sp = (uint16_t)uNewOuterRsp;
2515 else
2516 pCtx->rsp = uNewOuterRsp;
2517
2518 pVCpu->iem.s.uCpl = (uNewCs & X86_SEL_RPL);
2519 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2520 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2521 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2522 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2523
2524 /** @todo check if the hidden bits are loaded correctly for 64-bit
2525 * mode. */
2526
2527 if (cbPop)
2528 iemRegAddToRsp(pVCpu, pCtx, cbPop);
2529 pCtx->eflags.Bits.u1RF = 0;
2530
2531 /* Done! */
2532 }
2533 /*
2534 * Return to the same privilege level
2535 */
2536 else
2537 {
2538 /* Limit / canonical check. */
2539 uint64_t u64Base;
2540 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2541
2542 /** @todo Testcase: Is this correct? */
2543 if ( DescCs.Legacy.Gen.u1Long
2544 && IEM_IS_LONG_MODE(pVCpu) )
2545 {
2546 if (!IEM_IS_CANONICAL(uNewRip))
2547 {
2548 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
2549 return iemRaiseNotCanonical(pVCpu);
2550 }
2551 u64Base = 0;
2552 }
2553 else
2554 {
2555 if (uNewRip > cbLimitCs)
2556 {
2557 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
2558 /** @todo: Intel says this is #GP(0)! */
2559 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2560 }
2561 u64Base = X86DESC_BASE(&DescCs.Legacy);
2562 }
2563
2564 /*
2565 * Now set the accessed bit before
2566 * writing the return address to the stack and committing the result into
2567 * CS, CSHID and RIP.
2568 */
2569 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2570 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2571 {
2572 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2573 if (rcStrict != VINF_SUCCESS)
2574 return rcStrict;
2575 /** @todo check what VT-x and AMD-V does. */
2576 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2577 }
2578
2579 /* commit */
2580 if (!pCtx->ss.Attr.n.u1DefBig)
2581 pCtx->sp = (uint16_t)uNewRsp;
2582 else
2583 pCtx->rsp = uNewRsp;
2584 if (enmEffOpSize == IEMMODE_16BIT)
2585 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2586 else
2587 pCtx->rip = uNewRip;
2588 pCtx->cs.Sel = uNewCs;
2589 pCtx->cs.ValidSel = uNewCs;
2590 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2591 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2592 pCtx->cs.u32Limit = cbLimitCs;
2593 pCtx->cs.u64Base = u64Base;
2594 /** @todo check if the hidden bits are loaded correctly for 64-bit
2595 * mode. */
2596 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
2597 if (cbPop)
2598 iemRegAddToRsp(pVCpu, pCtx, cbPop);
2599 pCtx->eflags.Bits.u1RF = 0;
2600 }
2601
2602 /* Flush the prefetch buffer. */
2603#ifdef IEM_WITH_CODE_TLB
2604 pVCpu->iem.s.pbInstrBuf = NULL;
2605#else
2606 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2607#endif
2608 return VINF_SUCCESS;
2609}
2610
2611
2612/**
2613 * Implements retn.
2614 *
2615 * We're doing this in C because of the \#GP that might be raised if the popped
2616 * program counter is out of bounds.
2617 *
2618 * @param enmEffOpSize The effective operand size.
2619 * @param cbPop The amount of arguments to pop from the stack
2620 * (bytes).
2621 */
2622IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2623{
2624 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2625 NOREF(cbInstr);
2626
2627 /* Fetch the RSP from the stack. */
2628 VBOXSTRICTRC rcStrict;
2629 RTUINT64U NewRip;
2630 RTUINT64U NewRsp;
2631 NewRsp.u = pCtx->rsp;
2632 switch (enmEffOpSize)
2633 {
2634 case IEMMODE_16BIT:
2635 NewRip.u = 0;
2636 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRip.Words.w0, &NewRsp);
2637 break;
2638 case IEMMODE_32BIT:
2639 NewRip.u = 0;
2640 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRip.DWords.dw0, &NewRsp);
2641 break;
2642 case IEMMODE_64BIT:
2643 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRip.u, &NewRsp);
2644 break;
2645 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2646 }
2647 if (rcStrict != VINF_SUCCESS)
2648 return rcStrict;
2649
2650 /* Check the new RSP before loading it. */
2651 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
2652 * of it. The canonical test is performed here and for call. */
2653 if (enmEffOpSize != IEMMODE_64BIT)
2654 {
2655 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
2656 {
2657 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
2658 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2659 }
2660 }
2661 else
2662 {
2663 if (!IEM_IS_CANONICAL(NewRip.u))
2664 {
2665 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
2666 return iemRaiseNotCanonical(pVCpu);
2667 }
2668 }
2669
2670 /* Apply cbPop */
2671 if (cbPop)
2672 iemRegAddToRspEx(pVCpu, pCtx, &NewRsp, cbPop);
2673
2674 /* Commit it. */
2675 pCtx->rip = NewRip.u;
2676 pCtx->rsp = NewRsp.u;
2677 pCtx->eflags.Bits.u1RF = 0;
2678
2679 /* Flush the prefetch buffer. */
2680#ifndef IEM_WITH_CODE_TLB
2681 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2682#endif
2683
2684 return VINF_SUCCESS;
2685}
2686
2687
2688/**
2689 * Implements enter.
2690 *
2691 * We're doing this in C because the instruction is insane, even for the
2692 * u8NestingLevel=0 case dealing with the stack is tedious.
2693 *
2694 * @param enmEffOpSize The effective operand size.
2695 */
2696IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
2697{
2698 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2699
2700 /* Push RBP, saving the old value in TmpRbp. */
2701 RTUINT64U NewRsp; NewRsp.u = pCtx->rsp;
2702 RTUINT64U TmpRbp; TmpRbp.u = pCtx->rbp;
2703 RTUINT64U NewRbp;
2704 VBOXSTRICTRC rcStrict;
2705 if (enmEffOpSize == IEMMODE_64BIT)
2706 {
2707 rcStrict = iemMemStackPushU64Ex(pVCpu, TmpRbp.u, &NewRsp);
2708 NewRbp = NewRsp;
2709 }
2710 else if (enmEffOpSize == IEMMODE_32BIT)
2711 {
2712 rcStrict = iemMemStackPushU32Ex(pVCpu, TmpRbp.DWords.dw0, &NewRsp);
2713 NewRbp = NewRsp;
2714 }
2715 else
2716 {
2717 rcStrict = iemMemStackPushU16Ex(pVCpu, TmpRbp.Words.w0, &NewRsp);
2718 NewRbp = TmpRbp;
2719 NewRbp.Words.w0 = NewRsp.Words.w0;
2720 }
2721 if (rcStrict != VINF_SUCCESS)
2722 return rcStrict;
2723
2724 /* Copy the parameters (aka nesting levels by Intel). */
2725 cParameters &= 0x1f;
2726 if (cParameters > 0)
2727 {
2728 switch (enmEffOpSize)
2729 {
2730 case IEMMODE_16BIT:
2731 if (pCtx->ss.Attr.n.u1DefBig)
2732 TmpRbp.DWords.dw0 -= 2;
2733 else
2734 TmpRbp.Words.w0 -= 2;
2735 do
2736 {
2737 uint16_t u16Tmp;
2738 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Tmp, &TmpRbp);
2739 if (rcStrict != VINF_SUCCESS)
2740 break;
2741 rcStrict = iemMemStackPushU16Ex(pVCpu, u16Tmp, &NewRsp);
2742 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2743 break;
2744
2745 case IEMMODE_32BIT:
2746 if (pCtx->ss.Attr.n.u1DefBig)
2747 TmpRbp.DWords.dw0 -= 4;
2748 else
2749 TmpRbp.Words.w0 -= 4;
2750 do
2751 {
2752 uint32_t u32Tmp;
2753 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Tmp, &TmpRbp);
2754 if (rcStrict != VINF_SUCCESS)
2755 break;
2756 rcStrict = iemMemStackPushU32Ex(pVCpu, u32Tmp, &NewRsp);
2757 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2758 break;
2759
2760 case IEMMODE_64BIT:
2761 TmpRbp.u -= 8;
2762 do
2763 {
2764 uint64_t u64Tmp;
2765 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Tmp, &TmpRbp);
2766 if (rcStrict != VINF_SUCCESS)
2767 break;
2768 rcStrict = iemMemStackPushU64Ex(pVCpu, u64Tmp, &NewRsp);
2769 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2770 break;
2771
2772 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2773 }
2774 if (rcStrict != VINF_SUCCESS)
2775 return VINF_SUCCESS;
2776
2777 /* Push the new RBP */
2778 if (enmEffOpSize == IEMMODE_64BIT)
2779 rcStrict = iemMemStackPushU64Ex(pVCpu, NewRbp.u, &NewRsp);
2780 else if (enmEffOpSize == IEMMODE_32BIT)
2781 rcStrict = iemMemStackPushU32Ex(pVCpu, NewRbp.DWords.dw0, &NewRsp);
2782 else
2783 rcStrict = iemMemStackPushU16Ex(pVCpu, NewRbp.Words.w0, &NewRsp);
2784 if (rcStrict != VINF_SUCCESS)
2785 return rcStrict;
2786
2787 }
2788
2789 /* Recalc RSP. */
2790 iemRegSubFromRspEx(pVCpu, pCtx, &NewRsp, cbFrame);
2791
2792 /** @todo Should probe write access at the new RSP according to AMD. */
2793
2794 /* Commit it. */
2795 pCtx->rbp = NewRbp.u;
2796 pCtx->rsp = NewRsp.u;
2797 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2798
2799 return VINF_SUCCESS;
2800}
2801
2802
2803
2804/**
2805 * Implements leave.
2806 *
2807 * We're doing this in C because messing with the stack registers is annoying
2808 * since they depends on SS attributes.
2809 *
2810 * @param enmEffOpSize The effective operand size.
2811 */
2812IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
2813{
2814 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2815
2816 /* Calculate the intermediate RSP from RBP and the stack attributes. */
2817 RTUINT64U NewRsp;
2818 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2819 NewRsp.u = pCtx->rbp;
2820 else if (pCtx->ss.Attr.n.u1DefBig)
2821 NewRsp.u = pCtx->ebp;
2822 else
2823 {
2824 /** @todo Check that LEAVE actually preserve the high EBP bits. */
2825 NewRsp.u = pCtx->rsp;
2826 NewRsp.Words.w0 = pCtx->bp;
2827 }
2828
2829 /* Pop RBP according to the operand size. */
2830 VBOXSTRICTRC rcStrict;
2831 RTUINT64U NewRbp;
2832 switch (enmEffOpSize)
2833 {
2834 case IEMMODE_16BIT:
2835 NewRbp.u = pCtx->rbp;
2836 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRbp.Words.w0, &NewRsp);
2837 break;
2838 case IEMMODE_32BIT:
2839 NewRbp.u = 0;
2840 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRbp.DWords.dw0, &NewRsp);
2841 break;
2842 case IEMMODE_64BIT:
2843 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRbp.u, &NewRsp);
2844 break;
2845 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2846 }
2847 if (rcStrict != VINF_SUCCESS)
2848 return rcStrict;
2849
2850
2851 /* Commit it. */
2852 pCtx->rbp = NewRbp.u;
2853 pCtx->rsp = NewRsp.u;
2854 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2855
2856 return VINF_SUCCESS;
2857}
2858
2859
2860/**
2861 * Implements int3 and int XX.
2862 *
2863 * @param u8Int The interrupt vector number.
2864 * @param enmInt The int instruction type.
2865 */
2866IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt)
2867{
2868 Assert(pVCpu->iem.s.cXcptRecursions == 0);
2869 return iemRaiseXcptOrInt(pVCpu,
2870 cbInstr,
2871 u8Int,
2872 IEM_XCPT_FLAGS_T_SOFT_INT | enmInt,
2873 0,
2874 0);
2875}
2876
2877
2878/**
2879 * Implements iret for real mode and V8086 mode.
2880 *
2881 * @param enmEffOpSize The effective operand size.
2882 */
2883IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
2884{
2885 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2886 X86EFLAGS Efl;
2887 Efl.u = IEMMISC_GET_EFL(pVCpu, pCtx);
2888 NOREF(cbInstr);
2889
2890 /*
2891 * iret throws an exception if VME isn't enabled.
2892 */
2893 if ( Efl.Bits.u1VM
2894 && Efl.Bits.u2IOPL != 3
2895 && !(pCtx->cr4 & X86_CR4_VME))
2896 return iemRaiseGeneralProtectionFault0(pVCpu);
2897
2898 /*
2899 * Do the stack bits, but don't commit RSP before everything checks
2900 * out right.
2901 */
2902 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2903 VBOXSTRICTRC rcStrict;
2904 RTCPTRUNION uFrame;
2905 uint16_t uNewCs;
2906 uint32_t uNewEip;
2907 uint32_t uNewFlags;
2908 uint64_t uNewRsp;
2909 if (enmEffOpSize == IEMMODE_32BIT)
2910 {
2911 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, &uFrame.pv, &uNewRsp);
2912 if (rcStrict != VINF_SUCCESS)
2913 return rcStrict;
2914 uNewEip = uFrame.pu32[0];
2915 if (uNewEip > UINT16_MAX)
2916 return iemRaiseGeneralProtectionFault0(pVCpu);
2917
2918 uNewCs = (uint16_t)uFrame.pu32[1];
2919 uNewFlags = uFrame.pu32[2];
2920 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2921 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
2922 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
2923 | X86_EFL_ID;
2924 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
2925 uNewFlags &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
2926 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
2927 }
2928 else
2929 {
2930 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, &uFrame.pv, &uNewRsp);
2931 if (rcStrict != VINF_SUCCESS)
2932 return rcStrict;
2933 uNewEip = uFrame.pu16[0];
2934 uNewCs = uFrame.pu16[1];
2935 uNewFlags = uFrame.pu16[2];
2936 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2937 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
2938 uNewFlags |= Efl.u & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF);
2939 /** @todo The intel pseudo code does not indicate what happens to
2940 * reserved flags. We just ignore them. */
2941 /* Ancient CPU adjustments: See iemCImpl_popf. */
2942 if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286)
2943 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL);
2944 }
2945 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uFrame.pv);
2946 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2947 { /* extremely likely */ }
2948 else
2949 return rcStrict;
2950
2951 /** @todo Check how this is supposed to work if sp=0xfffe. */
2952 Log7(("iemCImpl_iret_real_v8086: uNewCs=%#06x uNewRip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n",
2953 uNewCs, uNewEip, uNewFlags, uNewRsp));
2954
2955 /*
2956 * Check the limit of the new EIP.
2957 */
2958 /** @todo Only the AMD pseudo code check the limit here, what's
2959 * right? */
2960 if (uNewEip > pCtx->cs.u32Limit)
2961 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2962
2963 /*
2964 * V8086 checks and flag adjustments
2965 */
2966 if (Efl.Bits.u1VM)
2967 {
2968 if (Efl.Bits.u2IOPL == 3)
2969 {
2970 /* Preserve IOPL and clear RF. */
2971 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
2972 uNewFlags |= Efl.u & (X86_EFL_IOPL);
2973 }
2974 else if ( enmEffOpSize == IEMMODE_16BIT
2975 && ( !(uNewFlags & X86_EFL_IF)
2976 || !Efl.Bits.u1VIP )
2977 && !(uNewFlags & X86_EFL_TF) )
2978 {
2979 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
2980 uNewFlags &= ~X86_EFL_VIF;
2981 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
2982 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
2983 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
2984 }
2985 else
2986 return iemRaiseGeneralProtectionFault0(pVCpu);
2987 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags));
2988 }
2989
2990 /*
2991 * Commit the operation.
2992 */
2993#ifdef DBGFTRACE_ENABLED
2994 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
2995 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewRsp);
2996#endif
2997 pCtx->rsp = uNewRsp;
2998 pCtx->rip = uNewEip;
2999 pCtx->cs.Sel = uNewCs;
3000 pCtx->cs.ValidSel = uNewCs;
3001 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3002 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
3003 /** @todo do we load attribs and limit as well? */
3004 Assert(uNewFlags & X86_EFL_1);
3005 IEMMISC_SET_EFL(pVCpu, pCtx, uNewFlags);
3006
3007 /* Flush the prefetch buffer. */
3008#ifdef IEM_WITH_CODE_TLB
3009 pVCpu->iem.s.pbInstrBuf = NULL;
3010#else
3011 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3012#endif
3013
3014 return VINF_SUCCESS;
3015}
3016
3017
3018/**
3019 * Loads a segment register when entering V8086 mode.
3020 *
3021 * @param pSReg The segment register.
3022 * @param uSeg The segment to load.
3023 */
3024static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
3025{
3026 pSReg->Sel = uSeg;
3027 pSReg->ValidSel = uSeg;
3028 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3029 pSReg->u64Base = (uint32_t)uSeg << 4;
3030 pSReg->u32Limit = 0xffff;
3031 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
3032 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
3033 * IRET'ing to V8086. */
3034}
3035
3036
3037/**
3038 * Implements iret for protected mode returning to V8086 mode.
3039 *
3040 * @param pCtx Pointer to the CPU context.
3041 * @param uNewEip The new EIP.
3042 * @param uNewCs The new CS.
3043 * @param uNewFlags The new EFLAGS.
3044 * @param uNewRsp The RSP after the initial IRET frame.
3045 *
3046 * @note This can only be a 32-bit iret du to the X86_EFL_VM position.
3047 */
3048IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uint16_t, uNewCs,
3049 uint32_t, uNewFlags, uint64_t, uNewRsp)
3050{
3051 RT_NOREF_PV(cbInstr);
3052
3053 /*
3054 * Pop the V8086 specific frame bits off the stack.
3055 */
3056 VBOXSTRICTRC rcStrict;
3057 RTCPTRUNION uFrame;
3058 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 24, &uFrame.pv, &uNewRsp);
3059 if (rcStrict != VINF_SUCCESS)
3060 return rcStrict;
3061 uint32_t uNewEsp = uFrame.pu32[0];
3062 uint16_t uNewSs = uFrame.pu32[1];
3063 uint16_t uNewEs = uFrame.pu32[2];
3064 uint16_t uNewDs = uFrame.pu32[3];
3065 uint16_t uNewFs = uFrame.pu32[4];
3066 uint16_t uNewGs = uFrame.pu32[5];
3067 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3068 if (rcStrict != VINF_SUCCESS)
3069 return rcStrict;
3070
3071 /*
3072 * Commit the operation.
3073 */
3074 uNewFlags &= X86_EFL_LIVE_MASK;
3075 uNewFlags |= X86_EFL_RA1_MASK;
3076#ifdef DBGFTRACE_ENABLED
3077 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
3078 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);
3079#endif
3080 Log7(("iemCImpl_iret_prot_v8086: %04x:%08x -> %04x:%04x %x %04x:%04x\n", pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp));
3081
3082 IEMMISC_SET_EFL(pVCpu, pCtx, uNewFlags);
3083 iemCImplCommonV8086LoadSeg(&pCtx->cs, uNewCs);
3084 iemCImplCommonV8086LoadSeg(&pCtx->ss, uNewSs);
3085 iemCImplCommonV8086LoadSeg(&pCtx->es, uNewEs);
3086 iemCImplCommonV8086LoadSeg(&pCtx->ds, uNewDs);
3087 iemCImplCommonV8086LoadSeg(&pCtx->fs, uNewFs);
3088 iemCImplCommonV8086LoadSeg(&pCtx->gs, uNewGs);
3089 pCtx->rip = (uint16_t)uNewEip;
3090 pCtx->rsp = uNewEsp; /** @todo check this out! */
3091 pVCpu->iem.s.uCpl = 3;
3092
3093 /* Flush the prefetch buffer. */
3094#ifdef IEM_WITH_CODE_TLB
3095 pVCpu->iem.s.pbInstrBuf = NULL;
3096#else
3097 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3098#endif
3099
3100 return VINF_SUCCESS;
3101}
3102
3103
3104/**
3105 * Implements iret for protected mode returning via a nested task.
3106 *
3107 * @param enmEffOpSize The effective operand size.
3108 */
3109IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
3110{
3111 Log7(("iemCImpl_iret_prot_NestedTask:\n"));
3112#ifndef IEM_IMPLEMENTS_TASKSWITCH
3113 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
3114#else
3115 RT_NOREF_PV(enmEffOpSize);
3116
3117 /*
3118 * Read the segment selector in the link-field of the current TSS.
3119 */
3120 RTSEL uSelRet;
3121 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
3122 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &uSelRet, UINT8_MAX, pCtx->tr.u64Base);
3123 if (rcStrict != VINF_SUCCESS)
3124 return rcStrict;
3125
3126 /*
3127 * Fetch the returning task's TSS descriptor from the GDT.
3128 */
3129 if (uSelRet & X86_SEL_LDT)
3130 {
3131 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet));
3132 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet);
3133 }
3134
3135 IEMSELDESC TssDesc;
3136 rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelRet, X86_XCPT_GP);
3137 if (rcStrict != VINF_SUCCESS)
3138 return rcStrict;
3139
3140 if (TssDesc.Legacy.Gate.u1DescType)
3141 {
3142 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet));
3143 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3144 }
3145
3146 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
3147 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
3148 {
3149 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
3150 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3151 }
3152
3153 if (!TssDesc.Legacy.Gate.u1Present)
3154 {
3155 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet));
3156 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3157 }
3158
3159 uint32_t uNextEip = pCtx->eip + cbInstr;
3160 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
3161 0 /* uCr2 */, uSelRet, &TssDesc);
3162#endif
3163}
3164
3165
3166/**
3167 * Implements iret for protected mode
3168 *
3169 * @param enmEffOpSize The effective operand size.
3170 */
3171IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
3172{
3173 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
3174 NOREF(cbInstr);
3175 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3176
3177 /*
3178 * Nested task return.
3179 */
3180 if (pCtx->eflags.Bits.u1NT)
3181 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
3182
3183 /*
3184 * Normal return.
3185 *
3186 * Do the stack bits, but don't commit RSP before everything checks
3187 * out right.
3188 */
3189 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3190 VBOXSTRICTRC rcStrict;
3191 RTCPTRUNION uFrame;
3192 uint16_t uNewCs;
3193 uint32_t uNewEip;
3194 uint32_t uNewFlags;
3195 uint64_t uNewRsp;
3196 if (enmEffOpSize == IEMMODE_32BIT)
3197 {
3198 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, &uFrame.pv, &uNewRsp);
3199 if (rcStrict != VINF_SUCCESS)
3200 return rcStrict;
3201 uNewEip = uFrame.pu32[0];
3202 uNewCs = (uint16_t)uFrame.pu32[1];
3203 uNewFlags = uFrame.pu32[2];
3204 }
3205 else
3206 {
3207 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, &uFrame.pv, &uNewRsp);
3208 if (rcStrict != VINF_SUCCESS)
3209 return rcStrict;
3210 uNewEip = uFrame.pu16[0];
3211 uNewCs = uFrame.pu16[1];
3212 uNewFlags = uFrame.pu16[2];
3213 }
3214 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3215 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3216 { /* extremely likely */ }
3217 else
3218 return rcStrict;
3219 Log7(("iemCImpl_iret_prot: uNewCs=%#06x uNewEip=%#010x uNewFlags=%#x uNewRsp=%#18llx uCpl=%u\n", uNewCs, uNewEip, uNewFlags, uNewRsp, pVCpu->iem.s.uCpl));
3220
3221 /*
3222 * We're hopefully not returning to V8086 mode...
3223 */
3224 if ( (uNewFlags & X86_EFL_VM)
3225 && pVCpu->iem.s.uCpl == 0)
3226 {
3227 Assert(enmEffOpSize == IEMMODE_32BIT);
3228 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, pCtx, uNewEip, uNewCs, uNewFlags, uNewRsp);
3229 }
3230
3231 /*
3232 * Protected mode.
3233 */
3234 /* Read the CS descriptor. */
3235 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3236 {
3237 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
3238 return iemRaiseGeneralProtectionFault0(pVCpu);
3239 }
3240
3241 IEMSELDESC DescCS;
3242 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3243 if (rcStrict != VINF_SUCCESS)
3244 {
3245 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
3246 return rcStrict;
3247 }
3248
3249 /* Must be a code descriptor. */
3250 if (!DescCS.Legacy.Gen.u1DescType)
3251 {
3252 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3253 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3254 }
3255 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3256 {
3257 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3258 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3259 }
3260
3261#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3262 /* Raw ring-0 and ring-1 compression adjustments for PATM performance tricks and other CS leaks. */
3263 PVM pVM = pVCpu->CTX_SUFF(pVM);
3264 if (EMIsRawRing0Enabled(pVM) && !HMIsEnabled(pVM))
3265 {
3266 if ((uNewCs & X86_SEL_RPL) == 1)
3267 {
3268 if ( pVCpu->iem.s.uCpl == 0
3269 && ( !EMIsRawRing1Enabled(pVM)
3270 || pCtx->cs.Sel == (uNewCs & X86_SEL_MASK_OFF_RPL)) )
3271 {
3272 Log(("iret: Ring-0 compression fix: uNewCS=%#x -> %#x\n", uNewCs, uNewCs & X86_SEL_MASK_OFF_RPL));
3273 uNewCs &= X86_SEL_MASK_OFF_RPL;
3274 }
3275# ifdef LOG_ENABLED
3276 else if (pVCpu->iem.s.uCpl <= 1 && EMIsRawRing1Enabled(pVM))
3277 Log(("iret: uNewCs=%#x genuine return to ring-1.\n", uNewCs));
3278# endif
3279 }
3280 else if ( (uNewCs & X86_SEL_RPL) == 2
3281 && EMIsRawRing1Enabled(pVM)
3282 && pVCpu->iem.s.uCpl <= 1)
3283 {
3284 Log(("iret: Ring-1 compression fix: uNewCS=%#x -> %#x\n", uNewCs, (uNewCs & X86_SEL_MASK_OFF_RPL) | 1));
3285 uNewCs = (uNewCs & X86_SEL_MASK_OFF_RPL) | 2;
3286 }
3287 }
3288#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
3289
3290
3291 /* Privilege checks. */
3292 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3293 {
3294 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3295 {
3296 Log(("iret %04x:%08x - RPL != DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3297 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3298 }
3299 }
3300 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3301 {
3302 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3303 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3304 }
3305 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
3306 {
3307 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pVCpu->iem.s.uCpl));
3308 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3309 }
3310
3311 /* Present? */
3312 if (!DescCS.Legacy.Gen.u1Present)
3313 {
3314 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
3315 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3316 }
3317
3318 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3319
3320 /*
3321 * Return to outer level?
3322 */
3323 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
3324 {
3325 uint16_t uNewSS;
3326 uint32_t uNewESP;
3327 if (enmEffOpSize == IEMMODE_32BIT)
3328 {
3329 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 8, &uFrame.pv, &uNewRsp);
3330 if (rcStrict != VINF_SUCCESS)
3331 return rcStrict;
3332/** @todo We might be popping a 32-bit ESP from the IRET frame, but whether
3333 * 16-bit or 32-bit are being loaded into SP depends on the D/B
3334 * bit of the popped SS selector it turns out. */
3335 uNewESP = uFrame.pu32[0];
3336 uNewSS = (uint16_t)uFrame.pu32[1];
3337 }
3338 else
3339 {
3340 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 4, &uFrame.pv, &uNewRsp);
3341 if (rcStrict != VINF_SUCCESS)
3342 return rcStrict;
3343 uNewESP = uFrame.pu16[0];
3344 uNewSS = uFrame.pu16[1];
3345 }
3346 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
3347 if (rcStrict != VINF_SUCCESS)
3348 return rcStrict;
3349 Log7(("iemCImpl_iret_prot: uNewSS=%#06x uNewESP=%#010x\n", uNewSS, uNewESP));
3350
3351 /* Read the SS descriptor. */
3352 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3353 {
3354 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
3355 return iemRaiseGeneralProtectionFault0(pVCpu);
3356 }
3357
3358 IEMSELDESC DescSS;
3359 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
3360 if (rcStrict != VINF_SUCCESS)
3361 {
3362 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
3363 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
3364 return rcStrict;
3365 }
3366
3367 /* Privilege checks. */
3368 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3369 {
3370 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
3371 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3372 }
3373 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3374 {
3375 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
3376 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
3377 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3378 }
3379
3380 /* Must be a writeable data segment descriptor. */
3381 if (!DescSS.Legacy.Gen.u1DescType)
3382 {
3383 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
3384 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3385 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3386 }
3387 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3388 {
3389 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
3390 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3391 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3392 }
3393
3394 /* Present? */
3395 if (!DescSS.Legacy.Gen.u1Present)
3396 {
3397 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
3398 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
3399 }
3400
3401 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3402
3403 /* Check EIP. */
3404 if (uNewEip > cbLimitCS)
3405 {
3406 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
3407 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
3408 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3409 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3410 }
3411
3412 /*
3413 * Commit the changes, marking CS and SS accessed first since
3414 * that may fail.
3415 */
3416 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3417 {
3418 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3419 if (rcStrict != VINF_SUCCESS)
3420 return rcStrict;
3421 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3422 }
3423 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3424 {
3425 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3426 if (rcStrict != VINF_SUCCESS)
3427 return rcStrict;
3428 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3429 }
3430
3431 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3432 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3433 if (enmEffOpSize != IEMMODE_16BIT)
3434 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3435 if (pVCpu->iem.s.uCpl == 0)
3436 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3437 else if (pVCpu->iem.s.uCpl <= pCtx->eflags.Bits.u2IOPL)
3438 fEFlagsMask |= X86_EFL_IF;
3439 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3440 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3441 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu, pCtx);
3442 fEFlagsNew &= ~fEFlagsMask;
3443 fEFlagsNew |= uNewFlags & fEFlagsMask;
3444#ifdef DBGFTRACE_ENABLED
3445 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
3446 pVCpu->iem.s.uCpl, uNewCs & X86_SEL_RPL, pCtx->cs.Sel, pCtx->eip,
3447 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP);
3448#endif
3449
3450 IEMMISC_SET_EFL(pVCpu, pCtx, fEFlagsNew);
3451 pCtx->rip = uNewEip;
3452 pCtx->cs.Sel = uNewCs;
3453 pCtx->cs.ValidSel = uNewCs;
3454 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3455 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3456 pCtx->cs.u32Limit = cbLimitCS;
3457 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3458 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
3459
3460 pCtx->ss.Sel = uNewSS;
3461 pCtx->ss.ValidSel = uNewSS;
3462 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3463 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3464 pCtx->ss.u32Limit = cbLimitSs;
3465 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3466 if (!pCtx->ss.Attr.n.u1DefBig)
3467 pCtx->sp = (uint16_t)uNewESP;
3468 else
3469 pCtx->rsp = uNewESP;
3470
3471 pVCpu->iem.s.uCpl = uNewCs & X86_SEL_RPL;
3472 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
3473 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
3474 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
3475 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
3476
3477 /* Done! */
3478
3479 }
3480 /*
3481 * Return to the same level.
3482 */
3483 else
3484 {
3485 /* Check EIP. */
3486 if (uNewEip > cbLimitCS)
3487 {
3488 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
3489 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3490 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3491 }
3492
3493 /*
3494 * Commit the changes, marking CS first since it may fail.
3495 */
3496 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3497 {
3498 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3499 if (rcStrict != VINF_SUCCESS)
3500 return rcStrict;
3501 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3502 }
3503
3504 X86EFLAGS NewEfl;
3505 NewEfl.u = IEMMISC_GET_EFL(pVCpu, pCtx);
3506 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3507 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3508 if (enmEffOpSize != IEMMODE_16BIT)
3509 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3510 if (pVCpu->iem.s.uCpl == 0)
3511 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3512 else if (pVCpu->iem.s.uCpl <= NewEfl.Bits.u2IOPL)
3513 fEFlagsMask |= X86_EFL_IF;
3514 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3515 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3516 NewEfl.u &= ~fEFlagsMask;
3517 NewEfl.u |= fEFlagsMask & uNewFlags;
3518#ifdef DBGFTRACE_ENABLED
3519 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
3520 pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip,
3521 uNewCs, uNewEip, uNewFlags, pCtx->ss.Sel, uNewRsp);
3522#endif
3523
3524 IEMMISC_SET_EFL(pVCpu, pCtx, NewEfl.u);
3525 pCtx->rip = uNewEip;
3526 pCtx->cs.Sel = uNewCs;
3527 pCtx->cs.ValidSel = uNewCs;
3528 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3529 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3530 pCtx->cs.u32Limit = cbLimitCS;
3531 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3532 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
3533 if (!pCtx->ss.Attr.n.u1DefBig)
3534 pCtx->sp = (uint16_t)uNewRsp;
3535 else
3536 pCtx->rsp = uNewRsp;
3537 /* Done! */
3538 }
3539
3540 /* Flush the prefetch buffer. */
3541#ifdef IEM_WITH_CODE_TLB
3542 pVCpu->iem.s.pbInstrBuf = NULL;
3543#else
3544 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3545#endif
3546
3547 return VINF_SUCCESS;
3548}
3549
3550
3551/**
3552 * Implements iret for long mode
3553 *
3554 * @param enmEffOpSize The effective operand size.
3555 */
3556IEM_CIMPL_DEF_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize)
3557{
3558 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
3559 NOREF(cbInstr);
3560
3561 /*
3562 * Nested task return is not supported in long mode.
3563 */
3564 if (pCtx->eflags.Bits.u1NT)
3565 {
3566 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pCtx->eflags.u));
3567 return iemRaiseGeneralProtectionFault0(pVCpu);
3568 }
3569
3570 /*
3571 * Normal return.
3572 *
3573 * Do the stack bits, but don't commit RSP before everything checks
3574 * out right.
3575 */
3576 VBOXSTRICTRC rcStrict;
3577 RTCPTRUNION uFrame;
3578 uint64_t uNewRip;
3579 uint16_t uNewCs;
3580 uint16_t uNewSs;
3581 uint32_t uNewFlags;
3582 uint64_t uNewRsp;
3583 if (enmEffOpSize == IEMMODE_64BIT)
3584 {
3585 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, &uFrame.pv, &uNewRsp);
3586 if (rcStrict != VINF_SUCCESS)
3587 return rcStrict;
3588 uNewRip = uFrame.pu64[0];
3589 uNewCs = (uint16_t)uFrame.pu64[1];
3590 uNewFlags = (uint32_t)uFrame.pu64[2];
3591 uNewRsp = uFrame.pu64[3];
3592 uNewSs = (uint16_t)uFrame.pu64[4];
3593 }
3594 else if (enmEffOpSize == IEMMODE_32BIT)
3595 {
3596 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, &uFrame.pv, &uNewRsp);
3597 if (rcStrict != VINF_SUCCESS)
3598 return rcStrict;
3599 uNewRip = uFrame.pu32[0];
3600 uNewCs = (uint16_t)uFrame.pu32[1];
3601 uNewFlags = uFrame.pu32[2];
3602 uNewRsp = uFrame.pu32[3];
3603 uNewSs = (uint16_t)uFrame.pu32[4];
3604 }
3605 else
3606 {
3607 Assert(enmEffOpSize == IEMMODE_16BIT);
3608 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, &uFrame.pv, &uNewRsp);
3609 if (rcStrict != VINF_SUCCESS)
3610 return rcStrict;
3611 uNewRip = uFrame.pu16[0];
3612 uNewCs = uFrame.pu16[1];
3613 uNewFlags = uFrame.pu16[2];
3614 uNewRsp = uFrame.pu16[3];
3615 uNewSs = uFrame.pu16[4];
3616 }
3617 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3618 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3619 { /* extremely like */ }
3620 else
3621 return rcStrict;
3622 Log7(("iretq stack: cs:rip=%04x:%016RX64 rflags=%016RX64 ss:rsp=%04x:%016RX64\n", uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
3623
3624 /*
3625 * Check stuff.
3626 */
3627 /* Read the CS descriptor. */
3628 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3629 {
3630 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3631 return iemRaiseGeneralProtectionFault0(pVCpu);
3632 }
3633
3634 IEMSELDESC DescCS;
3635 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3636 if (rcStrict != VINF_SUCCESS)
3637 {
3638 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
3639 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3640 return rcStrict;
3641 }
3642
3643 /* Must be a code descriptor. */
3644 if ( !DescCS.Legacy.Gen.u1DescType
3645 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3646 {
3647 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
3648 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3649 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3650 }
3651
3652 /* Privilege checks. */
3653 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
3654 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3655 {
3656 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3657 {
3658 Log(("iret %04x:%016RX64 - RPL != DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3659 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3660 }
3661 }
3662 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3663 {
3664 Log(("iret %04x:%016RX64 - RPL < DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3665 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3666 }
3667 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
3668 {
3669 Log(("iret %04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl));
3670 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3671 }
3672
3673 /* Present? */
3674 if (!DescCS.Legacy.Gen.u1Present)
3675 {
3676 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3677 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3678 }
3679
3680 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3681
3682 /* Read the SS descriptor. */
3683 IEMSELDESC DescSS;
3684 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3685 {
3686 if ( !DescCS.Legacy.Gen.u1Long
3687 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
3688 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
3689 {
3690 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3691 return iemRaiseGeneralProtectionFault0(pVCpu);
3692 }
3693 DescSS.Legacy.u = 0;
3694 }
3695 else
3696 {
3697 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
3698 if (rcStrict != VINF_SUCCESS)
3699 {
3700 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
3701 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3702 return rcStrict;
3703 }
3704 }
3705
3706 /* Privilege checks. */
3707 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3708 {
3709 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3710 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3711 }
3712
3713 uint32_t cbLimitSs;
3714 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3715 cbLimitSs = UINT32_MAX;
3716 else
3717 {
3718 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3719 {
3720 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
3721 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
3722 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3723 }
3724
3725 /* Must be a writeable data segment descriptor. */
3726 if (!DescSS.Legacy.Gen.u1DescType)
3727 {
3728 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
3729 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3730 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3731 }
3732 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3733 {
3734 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
3735 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3736 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3737 }
3738
3739 /* Present? */
3740 if (!DescSS.Legacy.Gen.u1Present)
3741 {
3742 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3743 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSs);
3744 }
3745 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3746 }
3747
3748 /* Check EIP. */
3749 if (DescCS.Legacy.Gen.u1Long)
3750 {
3751 if (!IEM_IS_CANONICAL(uNewRip))
3752 {
3753 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
3754 uNewCs, uNewRip, uNewSs, uNewRsp));
3755 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3756 }
3757 }
3758 else
3759 {
3760 if (uNewRip > cbLimitCS)
3761 {
3762 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
3763 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
3764 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3765 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3766 }
3767 }
3768
3769 /*
3770 * Commit the changes, marking CS and SS accessed first since
3771 * that may fail.
3772 */
3773 /** @todo where exactly are these actually marked accessed by a real CPU? */
3774 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3775 {
3776 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3777 if (rcStrict != VINF_SUCCESS)
3778 return rcStrict;
3779 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3780 }
3781 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3782 {
3783 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSs);
3784 if (rcStrict != VINF_SUCCESS)
3785 return rcStrict;
3786 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3787 }
3788
3789 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3790 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3791 if (enmEffOpSize != IEMMODE_16BIT)
3792 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3793 if (pVCpu->iem.s.uCpl == 0)
3794 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
3795 else if (pVCpu->iem.s.uCpl <= pCtx->eflags.Bits.u2IOPL)
3796 fEFlagsMask |= X86_EFL_IF;
3797 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu, pCtx);
3798 fEFlagsNew &= ~fEFlagsMask;
3799 fEFlagsNew |= uNewFlags & fEFlagsMask;
3800#ifdef DBGFTRACE_ENABLED
3801 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
3802 pVCpu->iem.s.uCpl, uNewCpl, pCtx->rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);
3803#endif
3804
3805 IEMMISC_SET_EFL(pVCpu, pCtx, fEFlagsNew);
3806 pCtx->rip = uNewRip;
3807 pCtx->cs.Sel = uNewCs;
3808 pCtx->cs.ValidSel = uNewCs;
3809 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3810 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3811 pCtx->cs.u32Limit = cbLimitCS;
3812 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3813 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
3814 if (pCtx->cs.Attr.n.u1Long || pCtx->cs.Attr.n.u1DefBig)
3815 pCtx->rsp = uNewRsp;
3816 else
3817 pCtx->sp = (uint16_t)uNewRsp;
3818 pCtx->ss.Sel = uNewSs;
3819 pCtx->ss.ValidSel = uNewSs;
3820 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3821 {
3822 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3823 pCtx->ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
3824 pCtx->ss.u32Limit = UINT32_MAX;
3825 pCtx->ss.u64Base = 0;
3826 Log2(("iretq new SS: NULL\n"));
3827 }
3828 else
3829 {
3830 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3831 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3832 pCtx->ss.u32Limit = cbLimitSs;
3833 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3834 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u));
3835 }
3836
3837 if (pVCpu->iem.s.uCpl != uNewCpl)
3838 {
3839 pVCpu->iem.s.uCpl = uNewCpl;
3840 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->ds);
3841 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->es);
3842 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->fs);
3843 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->gs);
3844 }
3845
3846 /* Flush the prefetch buffer. */
3847#ifdef IEM_WITH_CODE_TLB
3848 pVCpu->iem.s.pbInstrBuf = NULL;
3849#else
3850 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3851#endif
3852
3853 return VINF_SUCCESS;
3854}
3855
3856
3857/**
3858 * Implements iret.
3859 *
3860 * @param enmEffOpSize The effective operand size.
3861 */
3862IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
3863{
3864 /*
3865 * First, clear NMI blocking, if any, before causing any exceptions.
3866 */
3867 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
3868
3869 /*
3870 * The SVM nested-guest intercept for iret takes priority over all exceptions,
3871 * see AMD spec. "15.9 Instruction Intercepts".
3872 */
3873 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IRET))
3874 {
3875 Log(("iret: Guest intercept -> #VMEXIT\n"));
3876 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3877 }
3878
3879 /*
3880 * Call a mode specific worker.
3881 */
3882 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
3883 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
3884 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
3885 return IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize);
3886 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
3887}
3888
3889
3890/**
3891 * Implements SYSCALL (AMD and Intel64).
3892 *
3893 * @param enmEffOpSize The effective operand size.
3894 */
3895IEM_CIMPL_DEF_0(iemCImpl_syscall)
3896{
3897 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
3898
3899 /*
3900 * Check preconditions.
3901 *
3902 * Note that CPUs described in the documentation may load a few odd values
3903 * into CS and SS than we allow here. This has yet to be checked on real
3904 * hardware.
3905 */
3906 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
3907 {
3908 Log(("syscall: Not enabled in EFER -> #UD\n"));
3909 return iemRaiseUndefinedOpcode(pVCpu);
3910 }
3911 if (!(pCtx->cr0 & X86_CR0_PE))
3912 {
3913 Log(("syscall: Protected mode is required -> #GP(0)\n"));
3914 return iemRaiseGeneralProtectionFault0(pVCpu);
3915 }
3916 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(pCtx))
3917 {
3918 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3919 return iemRaiseUndefinedOpcode(pVCpu);
3920 }
3921
3922 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
3923 /** @todo what about LDT selectors? Shouldn't matter, really. */
3924 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
3925 uint16_t uNewSs = uNewCs + 8;
3926 if (uNewCs == 0 || uNewSs == 0)
3927 {
3928 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
3929 return iemRaiseGeneralProtectionFault0(pVCpu);
3930 }
3931
3932 /* Long mode and legacy mode differs. */
3933 if (CPUMIsGuestInLongModeEx(pCtx))
3934 {
3935 uint64_t uNewRip = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->msrLSTAR : pCtx-> msrCSTAR;
3936
3937 /* This test isn't in the docs, but I'm not trusting the guys writing
3938 the MSRs to have validated the values as canonical like they should. */
3939 if (!IEM_IS_CANONICAL(uNewRip))
3940 {
3941 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3942 return iemRaiseUndefinedOpcode(pVCpu);
3943 }
3944
3945 /*
3946 * Commit it.
3947 */
3948 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, uNewRip));
3949 pCtx->rcx = pCtx->rip + cbInstr;
3950 pCtx->rip = uNewRip;
3951
3952 pCtx->rflags.u &= ~X86_EFL_RF;
3953 pCtx->r11 = pCtx->rflags.u;
3954 pCtx->rflags.u &= ~pCtx->msrSFMASK;
3955 pCtx->rflags.u |= X86_EFL_1;
3956
3957 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3958 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3959 }
3960 else
3961 {
3962 /*
3963 * Commit it.
3964 */
3965 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n",
3966 pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, (uint32_t)(pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
3967 pCtx->rcx = pCtx->eip + cbInstr;
3968 pCtx->rip = pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
3969 pCtx->rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
3970
3971 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3972 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3973 }
3974 pCtx->cs.Sel = uNewCs;
3975 pCtx->cs.ValidSel = uNewCs;
3976 pCtx->cs.u64Base = 0;
3977 pCtx->cs.u32Limit = UINT32_MAX;
3978 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3979
3980 pCtx->ss.Sel = uNewSs;
3981 pCtx->ss.ValidSel = uNewSs;
3982 pCtx->ss.u64Base = 0;
3983 pCtx->ss.u32Limit = UINT32_MAX;
3984 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3985
3986 /* Flush the prefetch buffer. */
3987#ifdef IEM_WITH_CODE_TLB
3988 pVCpu->iem.s.pbInstrBuf = NULL;
3989#else
3990 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3991#endif
3992
3993 return VINF_SUCCESS;
3994}
3995
3996
3997/**
3998 * Implements SYSRET (AMD and Intel64).
3999 */
4000IEM_CIMPL_DEF_0(iemCImpl_sysret)
4001
4002{
4003 RT_NOREF_PV(cbInstr);
4004 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4005
4006 /*
4007 * Check preconditions.
4008 *
4009 * Note that CPUs described in the documentation may load a few odd values
4010 * into CS and SS than we allow here. This has yet to be checked on real
4011 * hardware.
4012 */
4013 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
4014 {
4015 Log(("sysret: Not enabled in EFER -> #UD\n"));
4016 return iemRaiseUndefinedOpcode(pVCpu);
4017 }
4018 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(pCtx))
4019 {
4020 Log(("sysret: Only available in long mode on intel -> #UD\n"));
4021 return iemRaiseUndefinedOpcode(pVCpu);
4022 }
4023 if (!(pCtx->cr0 & X86_CR0_PE))
4024 {
4025 Log(("sysret: Protected mode is required -> #GP(0)\n"));
4026 return iemRaiseGeneralProtectionFault0(pVCpu);
4027 }
4028 if (pVCpu->iem.s.uCpl != 0)
4029 {
4030 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
4031 return iemRaiseGeneralProtectionFault0(pVCpu);
4032 }
4033
4034 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
4035 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4036 uint16_t uNewSs = uNewCs + 8;
4037 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
4038 uNewCs += 16;
4039 if (uNewCs == 0 || uNewSs == 0)
4040 {
4041 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4042 return iemRaiseGeneralProtectionFault0(pVCpu);
4043 }
4044
4045 /*
4046 * Commit it.
4047 */
4048 if (CPUMIsGuestInLongModeEx(pCtx))
4049 {
4050 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
4051 {
4052 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n",
4053 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->rcx, pCtx->r11));
4054 /* Note! We disregard intel manual regarding the RCX cananonical
4055 check, ask intel+xen why AMD doesn't do it. */
4056 pCtx->rip = pCtx->rcx;
4057 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4058 | (3 << X86DESCATTR_DPL_SHIFT);
4059 }
4060 else
4061 {
4062 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n",
4063 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->ecx, pCtx->r11));
4064 pCtx->rip = pCtx->ecx;
4065 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4066 | (3 << X86DESCATTR_DPL_SHIFT);
4067 }
4068 /** @todo testcase: See what kind of flags we can make SYSRET restore and
4069 * what it really ignores. RF and VM are hinted at being zero, by AMD. */
4070 pCtx->rflags.u = pCtx->r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
4071 pCtx->rflags.u |= X86_EFL_1;
4072 }
4073 else
4074 {
4075 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, pCtx->ecx));
4076 pCtx->rip = pCtx->rcx;
4077 pCtx->rflags.u |= X86_EFL_IF;
4078 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4079 | (3 << X86DESCATTR_DPL_SHIFT);
4080 }
4081 pCtx->cs.Sel = uNewCs | 3;
4082 pCtx->cs.ValidSel = uNewCs | 3;
4083 pCtx->cs.u64Base = 0;
4084 pCtx->cs.u32Limit = UINT32_MAX;
4085 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4086
4087 pCtx->ss.Sel = uNewSs | 3;
4088 pCtx->ss.ValidSel = uNewSs | 3;
4089 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4090 /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */
4091 pCtx->ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
4092 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
4093 * on sysret. */
4094
4095 /* Flush the prefetch buffer. */
4096#ifdef IEM_WITH_CODE_TLB
4097 pVCpu->iem.s.pbInstrBuf = NULL;
4098#else
4099 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4100#endif
4101
4102 return VINF_SUCCESS;
4103}
4104
4105
4106/**
4107 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
4108 *
4109 * @param iSegReg The segment register number (valid).
4110 * @param uSel The new selector value.
4111 */
4112IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
4113{
4114 /*PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);*/
4115 uint16_t *pSel = iemSRegRef(pVCpu, iSegReg);
4116 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4117
4118 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
4119
4120 /*
4121 * Real mode and V8086 mode are easy.
4122 */
4123 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT
4124 && IEM_IS_REAL_OR_V86_MODE(pVCpu))
4125 {
4126 *pSel = uSel;
4127 pHid->u64Base = (uint32_t)uSel << 4;
4128 pHid->ValidSel = uSel;
4129 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4130#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
4131 /** @todo Does the CPU actually load limits and attributes in the
4132 * real/V8086 mode segment load case? It doesn't for CS in far
4133 * jumps... Affects unreal mode. */
4134 pHid->u32Limit = 0xffff;
4135 pHid->Attr.u = 0;
4136 pHid->Attr.n.u1Present = 1;
4137 pHid->Attr.n.u1DescType = 1;
4138 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
4139 ? X86_SEL_TYPE_RW
4140 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
4141#endif
4142 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4143 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4144 return VINF_SUCCESS;
4145 }
4146
4147 /*
4148 * Protected mode.
4149 *
4150 * Check if it's a null segment selector value first, that's OK for DS, ES,
4151 * FS and GS. If not null, then we have to load and parse the descriptor.
4152 */
4153 if (!(uSel & X86_SEL_MASK_OFF_RPL))
4154 {
4155 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
4156 if (iSegReg == X86_SREG_SS)
4157 {
4158 /* In 64-bit kernel mode, the stack can be 0 because of the way
4159 interrupts are dispatched. AMD seems to have a slighly more
4160 relaxed relationship to SS.RPL than intel does. */
4161 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
4162 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4163 || pVCpu->iem.s.uCpl > 2
4164 || ( uSel != pVCpu->iem.s.uCpl
4165 && !IEM_IS_GUEST_CPU_AMD(pVCpu)) )
4166 {
4167 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
4168 return iemRaiseGeneralProtectionFault0(pVCpu);
4169 }
4170 }
4171
4172 *pSel = uSel; /* Not RPL, remember :-) */
4173 iemHlpLoadNullDataSelectorProt(pVCpu, pHid, uSel);
4174 if (iSegReg == X86_SREG_SS)
4175 pHid->Attr.u |= pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT;
4176
4177 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4178 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4179
4180 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4181 return VINF_SUCCESS;
4182 }
4183
4184 /* Fetch the descriptor. */
4185 IEMSELDESC Desc;
4186 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
4187 if (rcStrict != VINF_SUCCESS)
4188 return rcStrict;
4189
4190 /* Check GPs first. */
4191 if (!Desc.Legacy.Gen.u1DescType)
4192 {
4193 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
4194 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4195 }
4196 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
4197 {
4198 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4199 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
4200 {
4201 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
4202 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4203 }
4204 if ((uSel & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
4205 {
4206 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pVCpu->iem.s.uCpl));
4207 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4208 }
4209 if (Desc.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl)
4210 {
4211 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4212 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4213 }
4214 }
4215 else
4216 {
4217 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4218 {
4219 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
4220 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4221 }
4222 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4223 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4224 {
4225#if 0 /* this is what intel says. */
4226 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4227 && pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4228 {
4229 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
4230 iSegReg, uSel, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl));
4231 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4232 }
4233#else /* this is what makes more sense. */
4234 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4235 {
4236 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4237 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4238 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4239 }
4240 if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4241 {
4242 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4243 iSegReg, uSel, pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl));
4244 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4245 }
4246#endif
4247 }
4248 }
4249
4250 /* Is it there? */
4251 if (!Desc.Legacy.Gen.u1Present)
4252 {
4253 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4254 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
4255 }
4256
4257 /* The base and limit. */
4258 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4259 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
4260
4261 /*
4262 * Ok, everything checked out fine. Now set the accessed bit before
4263 * committing the result into the registers.
4264 */
4265 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4266 {
4267 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
4268 if (rcStrict != VINF_SUCCESS)
4269 return rcStrict;
4270 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4271 }
4272
4273 /* commit */
4274 *pSel = uSel;
4275 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4276 pHid->u32Limit = cbLimit;
4277 pHid->u64Base = u64Base;
4278 pHid->ValidSel = uSel;
4279 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4280
4281 /** @todo check if the hidden bits are loaded correctly for 64-bit
4282 * mode. */
4283 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4284
4285 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4286 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4287 return VINF_SUCCESS;
4288}
4289
4290
4291/**
4292 * Implements 'mov SReg, r/m'.
4293 *
4294 * @param iSegReg The segment register number (valid).
4295 * @param uSel The new selector value.
4296 */
4297IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
4298{
4299 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4300 if (rcStrict == VINF_SUCCESS)
4301 {
4302 if (iSegReg == X86_SREG_SS)
4303 {
4304 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4305 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
4306 }
4307 }
4308 return rcStrict;
4309}
4310
4311
4312/**
4313 * Implements 'pop SReg'.
4314 *
4315 * @param iSegReg The segment register number (valid).
4316 * @param enmEffOpSize The efficient operand size (valid).
4317 */
4318IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4319{
4320 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4321 VBOXSTRICTRC rcStrict;
4322
4323 /*
4324 * Read the selector off the stack and join paths with mov ss, reg.
4325 */
4326 RTUINT64U TmpRsp;
4327 TmpRsp.u = pCtx->rsp;
4328 switch (enmEffOpSize)
4329 {
4330 case IEMMODE_16BIT:
4331 {
4332 uint16_t uSel;
4333 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp);
4334 if (rcStrict == VINF_SUCCESS)
4335 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4336 break;
4337 }
4338
4339 case IEMMODE_32BIT:
4340 {
4341 uint32_t u32Value;
4342 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp);
4343 if (rcStrict == VINF_SUCCESS)
4344 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
4345 break;
4346 }
4347
4348 case IEMMODE_64BIT:
4349 {
4350 uint64_t u64Value;
4351 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp);
4352 if (rcStrict == VINF_SUCCESS)
4353 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
4354 break;
4355 }
4356 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4357 }
4358
4359 /*
4360 * Commit the stack on success.
4361 */
4362 if (rcStrict == VINF_SUCCESS)
4363 {
4364 pCtx->rsp = TmpRsp.u;
4365 if (iSegReg == X86_SREG_SS)
4366 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
4367 }
4368 return rcStrict;
4369}
4370
4371
4372/**
4373 * Implements lgs, lfs, les, lds & lss.
4374 */
4375IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
4376 uint16_t, uSel,
4377 uint64_t, offSeg,
4378 uint8_t, iSegReg,
4379 uint8_t, iGReg,
4380 IEMMODE, enmEffOpSize)
4381{
4382 /*PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);*/
4383 VBOXSTRICTRC rcStrict;
4384
4385 /*
4386 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
4387 */
4388 /** @todo verify and test that mov, pop and lXs works the segment
4389 * register loading in the exact same way. */
4390 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4391 if (rcStrict == VINF_SUCCESS)
4392 {
4393 switch (enmEffOpSize)
4394 {
4395 case IEMMODE_16BIT:
4396 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4397 break;
4398 case IEMMODE_32BIT:
4399 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4400 break;
4401 case IEMMODE_64BIT:
4402 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4403 break;
4404 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4405 }
4406 }
4407
4408 return rcStrict;
4409}
4410
4411
4412/**
4413 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
4414 *
4415 * @retval VINF_SUCCESS on success.
4416 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
4417 * @retval iemMemFetchSysU64 return value.
4418 *
4419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4420 * @param uSel The selector value.
4421 * @param fAllowSysDesc Whether system descriptors are OK or not.
4422 * @param pDesc Where to return the descriptor on success.
4423 */
4424static VBOXSTRICTRC iemCImpl_LoadDescHelper(PVMCPU pVCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
4425{
4426 pDesc->Long.au64[0] = 0;
4427 pDesc->Long.au64[1] = 0;
4428
4429 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */
4430 return VINF_IEM_SELECTOR_NOT_OK;
4431
4432 /* Within the table limits? */
4433 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4434 RTGCPTR GCPtrBase;
4435 if (uSel & X86_SEL_LDT)
4436 {
4437 if ( !pCtx->ldtr.Attr.n.u1Present
4438 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
4439 return VINF_IEM_SELECTOR_NOT_OK;
4440 GCPtrBase = pCtx->ldtr.u64Base;
4441 }
4442 else
4443 {
4444 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
4445 return VINF_IEM_SELECTOR_NOT_OK;
4446 GCPtrBase = pCtx->gdtr.pGdt;
4447 }
4448
4449 /* Fetch the descriptor. */
4450 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4451 if (rcStrict != VINF_SUCCESS)
4452 return rcStrict;
4453 if (!pDesc->Legacy.Gen.u1DescType)
4454 {
4455 if (!fAllowSysDesc)
4456 return VINF_IEM_SELECTOR_NOT_OK;
4457 if (CPUMIsGuestInLongModeEx(pCtx))
4458 {
4459 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
4460 if (rcStrict != VINF_SUCCESS)
4461 return rcStrict;
4462 }
4463
4464 }
4465
4466 return VINF_SUCCESS;
4467}
4468
4469
4470/**
4471 * Implements verr (fWrite = false) and verw (fWrite = true).
4472 */
4473IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite)
4474{
4475 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4476
4477 /** @todo figure whether the accessed bit is set or not. */
4478
4479 bool fAccessible = true;
4480 IEMSELDESC Desc;
4481 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4482 if (rcStrict == VINF_SUCCESS)
4483 {
4484 /* Check the descriptor, order doesn't matter much here. */
4485 if ( !Desc.Legacy.Gen.u1DescType
4486 || !Desc.Legacy.Gen.u1Present)
4487 fAccessible = false;
4488 else
4489 {
4490 if ( fWrite
4491 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE
4492 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4493 fAccessible = false;
4494
4495 /** @todo testcase for the conforming behavior. */
4496 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4497 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4498 {
4499 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4500 fAccessible = false;
4501 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4502 fAccessible = false;
4503 }
4504 }
4505
4506 }
4507 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4508 fAccessible = false;
4509 else
4510 return rcStrict;
4511
4512 /* commit */
4513 IEM_GET_CTX(pVCpu)->eflags.Bits.u1ZF = fAccessible;
4514
4515 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4516 return VINF_SUCCESS;
4517}
4518
4519
4520/**
4521 * Implements LAR and LSL with 64-bit operand size.
4522 *
4523 * @returns VINF_SUCCESS.
4524 * @param pu16Dst Pointer to the destination register.
4525 * @param uSel The selector to load details for.
4526 * @param fIsLar true = LAR, false = LSL.
4527 */
4528IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar)
4529{
4530 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4531
4532 /** @todo figure whether the accessed bit is set or not. */
4533
4534 bool fDescOk = true;
4535 IEMSELDESC Desc;
4536 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4537 if (rcStrict == VINF_SUCCESS)
4538 {
4539 /*
4540 * Check the descriptor type.
4541 */
4542 if (!Desc.Legacy.Gen.u1DescType)
4543 {
4544 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4545 {
4546 if (Desc.Long.Gen.u5Zeros)
4547 fDescOk = false;
4548 else
4549 switch (Desc.Long.Gen.u4Type)
4550 {
4551 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
4552 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
4553 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
4554 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
4555 break;
4556 case AMD64_SEL_TYPE_SYS_CALL_GATE:
4557 fDescOk = fIsLar;
4558 break;
4559 default:
4560 fDescOk = false;
4561 break;
4562 }
4563 }
4564 else
4565 {
4566 switch (Desc.Long.Gen.u4Type)
4567 {
4568 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4569 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4570 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4571 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4572 case X86_SEL_TYPE_SYS_LDT:
4573 break;
4574 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4575 case X86_SEL_TYPE_SYS_TASK_GATE:
4576 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4577 fDescOk = fIsLar;
4578 break;
4579 default:
4580 fDescOk = false;
4581 break;
4582 }
4583 }
4584 }
4585 if (fDescOk)
4586 {
4587 /*
4588 * Check the RPL/DPL/CPL interaction..
4589 */
4590 /** @todo testcase for the conforming behavior. */
4591 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
4592 || !Desc.Legacy.Gen.u1DescType)
4593 {
4594 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4595 fDescOk = false;
4596 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4597 fDescOk = false;
4598 }
4599 }
4600
4601 if (fDescOk)
4602 {
4603 /*
4604 * All fine, start committing the result.
4605 */
4606 if (fIsLar)
4607 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00);
4608 else
4609 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy);
4610 }
4611
4612 }
4613 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4614 fDescOk = false;
4615 else
4616 return rcStrict;
4617
4618 /* commit flags value and advance rip. */
4619 IEM_GET_CTX(pVCpu)->eflags.Bits.u1ZF = fDescOk;
4620 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4621
4622 return VINF_SUCCESS;
4623}
4624
4625
4626/**
4627 * Implements LAR and LSL with 16-bit operand size.
4628 *
4629 * @returns VINF_SUCCESS.
4630 * @param pu16Dst Pointer to the destination register.
4631 * @param u16Sel The selector to load details for.
4632 * @param fIsLar true = LAR, false = LSL.
4633 */
4634IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar)
4635{
4636 uint64_t u64TmpDst = *pu16Dst;
4637 IEM_CIMPL_CALL_3(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, fIsLar);
4638 *pu16Dst = u64TmpDst;
4639 return VINF_SUCCESS;
4640}
4641
4642
4643/**
4644 * Implements lgdt.
4645 *
4646 * @param iEffSeg The segment of the new gdtr contents
4647 * @param GCPtrEffSrc The address of the new gdtr contents.
4648 * @param enmEffOpSize The effective operand size.
4649 */
4650IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4651{
4652 if (pVCpu->iem.s.uCpl != 0)
4653 return iemRaiseGeneralProtectionFault0(pVCpu);
4654 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
4655
4656 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_WRITES))
4657 {
4658 Log(("lgdt: Guest intercept -> #VMEXIT\n"));
4659 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4660 }
4661
4662 /*
4663 * Fetch the limit and base address.
4664 */
4665 uint16_t cbLimit;
4666 RTGCPTR GCPtrBase;
4667 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4668 if (rcStrict == VINF_SUCCESS)
4669 {
4670 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4671 || X86_IS_CANONICAL(GCPtrBase))
4672 {
4673 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4674 rcStrict = CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
4675 else
4676 {
4677 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4678 pCtx->gdtr.cbGdt = cbLimit;
4679 pCtx->gdtr.pGdt = GCPtrBase;
4680 }
4681 if (rcStrict == VINF_SUCCESS)
4682 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4683 }
4684 else
4685 {
4686 Log(("iemCImpl_lgdt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
4687 return iemRaiseGeneralProtectionFault0(pVCpu);
4688 }
4689 }
4690 return rcStrict;
4691}
4692
4693
4694/**
4695 * Implements sgdt.
4696 *
4697 * @param iEffSeg The segment where to store the gdtr content.
4698 * @param GCPtrEffDst The address where to store the gdtr content.
4699 */
4700IEM_CIMPL_DEF_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4701{
4702 /*
4703 * Join paths with sidt.
4704 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4705 * you really must know.
4706 */
4707 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4708 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst);
4709 if (rcStrict == VINF_SUCCESS)
4710 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4711 return rcStrict;
4712}
4713
4714
4715/**
4716 * Implements lidt.
4717 *
4718 * @param iEffSeg The segment of the new idtr contents
4719 * @param GCPtrEffSrc The address of the new idtr contents.
4720 * @param enmEffOpSize The effective operand size.
4721 */
4722IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4723{
4724 if (pVCpu->iem.s.uCpl != 0)
4725 return iemRaiseGeneralProtectionFault0(pVCpu);
4726 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
4727
4728 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_WRITES))
4729 {
4730 Log(("lidt: Guest intercept -> #VMEXIT\n"));
4731 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4732 }
4733
4734 /*
4735 * Fetch the limit and base address.
4736 */
4737 uint16_t cbLimit;
4738 RTGCPTR GCPtrBase;
4739 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4740 if (rcStrict == VINF_SUCCESS)
4741 {
4742 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4743 || X86_IS_CANONICAL(GCPtrBase))
4744 {
4745 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4746 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
4747 else
4748 {
4749 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4750 pCtx->idtr.cbIdt = cbLimit;
4751 pCtx->idtr.pIdt = GCPtrBase;
4752 }
4753 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4754 }
4755 else
4756 {
4757 Log(("iemCImpl_lidt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
4758 return iemRaiseGeneralProtectionFault0(pVCpu);
4759 }
4760 }
4761 return rcStrict;
4762}
4763
4764
4765/**
4766 * Implements sidt.
4767 *
4768 * @param iEffSeg The segment where to store the idtr content.
4769 * @param GCPtrEffDst The address where to store the idtr content.
4770 */
4771IEM_CIMPL_DEF_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4772{
4773 /*
4774 * Join paths with sgdt.
4775 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4776 * you really must know.
4777 */
4778 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4779 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst);
4780 if (rcStrict == VINF_SUCCESS)
4781 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4782 return rcStrict;
4783}
4784
4785
4786/**
4787 * Implements lldt.
4788 *
4789 * @param uNewLdt The new LDT selector value.
4790 */
4791IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
4792{
4793 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4794
4795 /*
4796 * Check preconditions.
4797 */
4798 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4799 {
4800 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
4801 return iemRaiseUndefinedOpcode(pVCpu);
4802 }
4803 if (pVCpu->iem.s.uCpl != 0)
4804 {
4805 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pVCpu->iem.s.uCpl));
4806 return iemRaiseGeneralProtectionFault0(pVCpu);
4807 }
4808 if (uNewLdt & X86_SEL_LDT)
4809 {
4810 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
4811 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewLdt);
4812 }
4813
4814 /*
4815 * Now, loading a NULL selector is easy.
4816 */
4817 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4818 {
4819 /* Nested-guest SVM intercept. */
4820 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
4821 {
4822 Log(("lldt: Guest intercept -> #VMEXIT\n"));
4823 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4824 }
4825
4826 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
4827 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4828 CPUMSetGuestLDTR(pVCpu, uNewLdt);
4829 else
4830 pCtx->ldtr.Sel = uNewLdt;
4831 pCtx->ldtr.ValidSel = uNewLdt;
4832 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4833 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4834 {
4835 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4836 pCtx->ldtr.u64Base = pCtx->ldtr.u32Limit = 0; /* For verfication against REM. */
4837 }
4838 else if (IEM_IS_GUEST_CPU_AMD(pVCpu))
4839 {
4840 /* AMD-V seems to leave the base and limit alone. */
4841 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4842 }
4843 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4844 {
4845 /* VT-x (Intel 3960x) seems to be doing the following. */
4846 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;
4847 pCtx->ldtr.u64Base = 0;
4848 pCtx->ldtr.u32Limit = UINT32_MAX;
4849 }
4850
4851 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4852 return VINF_SUCCESS;
4853 }
4854
4855 /*
4856 * Read the descriptor.
4857 */
4858 IEMSELDESC Desc;
4859 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
4860 if (rcStrict != VINF_SUCCESS)
4861 return rcStrict;
4862
4863 /* Check GPs first. */
4864 if (Desc.Legacy.Gen.u1DescType)
4865 {
4866 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4867 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4868 }
4869 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4870 {
4871 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4872 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4873 }
4874 uint64_t u64Base;
4875 if (!IEM_IS_LONG_MODE(pVCpu))
4876 u64Base = X86DESC_BASE(&Desc.Legacy);
4877 else
4878 {
4879 if (Desc.Long.Gen.u5Zeros)
4880 {
4881 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
4882 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4883 }
4884
4885 u64Base = X86DESC64_BASE(&Desc.Long);
4886 if (!IEM_IS_CANONICAL(u64Base))
4887 {
4888 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
4889 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4890 }
4891 }
4892
4893 /* NP */
4894 if (!Desc.Legacy.Gen.u1Present)
4895 {
4896 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
4897 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewLdt);
4898 }
4899
4900 /* Nested-guest SVM intercept. */
4901 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
4902 {
4903 Log(("lldt: Guest intercept -> #VMEXIT\n"));
4904 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4905 }
4906
4907 /*
4908 * It checks out alright, update the registers.
4909 */
4910/** @todo check if the actual value is loaded or if the RPL is dropped */
4911 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4912 CPUMSetGuestLDTR(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4913 else
4914 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4915 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4916 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4917 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4918 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
4919 pCtx->ldtr.u64Base = u64Base;
4920
4921 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4922 return VINF_SUCCESS;
4923}
4924
4925
4926/**
4927 * Implements lldt.
4928 *
4929 * @param uNewLdt The new LDT selector value.
4930 */
4931IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
4932{
4933 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4934
4935 /*
4936 * Check preconditions.
4937 */
4938 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4939 {
4940 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
4941 return iemRaiseUndefinedOpcode(pVCpu);
4942 }
4943 if (pVCpu->iem.s.uCpl != 0)
4944 {
4945 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pVCpu->iem.s.uCpl));
4946 return iemRaiseGeneralProtectionFault0(pVCpu);
4947 }
4948 if (uNewTr & X86_SEL_LDT)
4949 {
4950 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
4951 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewTr);
4952 }
4953 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
4954 {
4955 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
4956 return iemRaiseGeneralProtectionFault0(pVCpu);
4957 }
4958 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TR_WRITES))
4959 {
4960 Log(("ltr: Guest intercept -> #VMEXIT\n"));
4961 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4962 }
4963
4964 /*
4965 * Read the descriptor.
4966 */
4967 IEMSELDESC Desc;
4968 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
4969 if (rcStrict != VINF_SUCCESS)
4970 return rcStrict;
4971
4972 /* Check GPs first. */
4973 if (Desc.Legacy.Gen.u1DescType)
4974 {
4975 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
4976 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4977 }
4978 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
4979 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4980 || IEM_IS_LONG_MODE(pVCpu)) )
4981 {
4982 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
4983 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4984 }
4985 uint64_t u64Base;
4986 if (!IEM_IS_LONG_MODE(pVCpu))
4987 u64Base = X86DESC_BASE(&Desc.Legacy);
4988 else
4989 {
4990 if (Desc.Long.Gen.u5Zeros)
4991 {
4992 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
4993 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
4994 }
4995
4996 u64Base = X86DESC64_BASE(&Desc.Long);
4997 if (!IEM_IS_CANONICAL(u64Base))
4998 {
4999 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
5000 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5001 }
5002 }
5003
5004 /* NP */
5005 if (!Desc.Legacy.Gen.u1Present)
5006 {
5007 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
5008 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewTr);
5009 }
5010
5011 /*
5012 * Set it busy.
5013 * Note! Intel says this should lock down the whole descriptor, but we'll
5014 * restrict our selves to 32-bit for now due to lack of inline
5015 * assembly and such.
5016 */
5017 void *pvDesc;
5018 rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW);
5019 if (rcStrict != VINF_SUCCESS)
5020 return rcStrict;
5021 switch ((uintptr_t)pvDesc & 3)
5022 {
5023 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
5024 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
5025 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
5026 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
5027 }
5028 rcStrict = iemMemCommitAndUnmap(pVCpu, pvDesc, IEM_ACCESS_DATA_RW);
5029 if (rcStrict != VINF_SUCCESS)
5030 return rcStrict;
5031 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
5032
5033 /*
5034 * It checks out alright, update the registers.
5035 */
5036/** @todo check if the actual value is loaded or if the RPL is dropped */
5037 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5038 CPUMSetGuestTR(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5039 else
5040 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
5041 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
5042 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
5043 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5044 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5045 pCtx->tr.u64Base = u64Base;
5046
5047 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5048 return VINF_SUCCESS;
5049}
5050
5051
5052/**
5053 * Implements mov GReg,CRx.
5054 *
5055 * @param iGReg The general register to store the CRx value in.
5056 * @param iCrReg The CRx register to read (valid).
5057 */
5058IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
5059{
5060 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5061 if (pVCpu->iem.s.uCpl != 0)
5062 return iemRaiseGeneralProtectionFault0(pVCpu);
5063 Assert(!pCtx->eflags.Bits.u1VM);
5064
5065 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(pVCpu, iCrReg))
5066 {
5067 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5068 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);
5069 }
5070
5071 /* read it */
5072 uint64_t crX;
5073 switch (iCrReg)
5074 {
5075 case 0:
5076 crX = pCtx->cr0;
5077 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
5078 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */
5079 break;
5080 case 2: crX = pCtx->cr2; break;
5081 case 3: crX = pCtx->cr3; break;
5082 case 4: crX = pCtx->cr4; break;
5083 case 8:
5084 {
5085 uint8_t uTpr;
5086 int rc = APICGetTpr(pVCpu, &uTpr, NULL, NULL);
5087 if (RT_SUCCESS(rc))
5088 crX = uTpr >> 4;
5089 else
5090 crX = 0;
5091 break;
5092 }
5093 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5094 }
5095
5096 /* store it */
5097 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5098 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = crX;
5099 else
5100 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)crX;
5101
5102 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5103 return VINF_SUCCESS;
5104}
5105
5106
5107/**
5108 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
5109 *
5110 * @param iCrReg The CRx register to write (valid).
5111 * @param uNewCrX The new value.
5112 * @param enmAccessCrx The instruction that caused the CrX load.
5113 * @param iGReg The general register in case of a 'mov CRx,GReg'
5114 * instruction.
5115 */
5116IEM_CIMPL_DEF_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg)
5117{
5118 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5119 VBOXSTRICTRC rcStrict;
5120 int rc;
5121#ifndef VBOX_WITH_NESTED_HWVIRT
5122 RT_NOREF2(iGReg, enmAccessCrX);
5123#endif
5124
5125 /*
5126 * Try store it.
5127 * Unfortunately, CPUM only does a tiny bit of the work.
5128 */
5129 switch (iCrReg)
5130 {
5131 case 0:
5132 {
5133 /*
5134 * Perform checks.
5135 */
5136 uint64_t const uOldCrX = pCtx->cr0;
5137 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
5138 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
5139 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
5140
5141 /* ET is hardcoded on 486 and later. */
5142 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_486)
5143 uNewCrX |= X86_CR0_ET;
5144 /* The 386 and 486 didn't #GP(0) on attempting to set reserved CR0 bits. ET was settable on 386. */
5145 else if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_486)
5146 {
5147 uNewCrX &= fValid;
5148 uNewCrX |= X86_CR0_ET;
5149 }
5150 else
5151 uNewCrX &= X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG | X86_CR0_ET;
5152
5153 /* Check for reserved bits. */
5154 if (uNewCrX & ~(uint64_t)fValid)
5155 {
5156 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5157 return iemRaiseGeneralProtectionFault0(pVCpu);
5158 }
5159
5160 /* Check for invalid combinations. */
5161 if ( (uNewCrX & X86_CR0_PG)
5162 && !(uNewCrX & X86_CR0_PE) )
5163 {
5164 Log(("Trying to set CR0.PG without CR0.PE\n"));
5165 return iemRaiseGeneralProtectionFault0(pVCpu);
5166 }
5167
5168 if ( !(uNewCrX & X86_CR0_CD)
5169 && (uNewCrX & X86_CR0_NW) )
5170 {
5171 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
5172 return iemRaiseGeneralProtectionFault0(pVCpu);
5173 }
5174
5175 /* Long mode consistency checks. */
5176 if ( (uNewCrX & X86_CR0_PG)
5177 && !(uOldCrX & X86_CR0_PG)
5178 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
5179 {
5180 if (!(pCtx->cr4 & X86_CR4_PAE))
5181 {
5182 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
5183 return iemRaiseGeneralProtectionFault0(pVCpu);
5184 }
5185 if (pCtx->cs.Attr.n.u1Long)
5186 {
5187 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
5188 return iemRaiseGeneralProtectionFault0(pVCpu);
5189 }
5190 }
5191
5192 /** @todo check reserved PDPTR bits as AMD states. */
5193
5194 /*
5195 * SVM nested-guest CR0 write intercepts.
5196 */
5197 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, iCrReg))
5198 {
5199 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5200 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);
5201 }
5202 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITES))
5203 {
5204 /* 'lmsw' intercepts regardless of whether the TS/MP bits are actually toggled. */
5205 if ( enmAccessCrX == IEMACCESSCRX_LMSW
5206 || (uNewCrX & ~(X86_CR0_TS | X86_CR0_MP)) != (uOldCrX & ~(X86_CR0_TS | X86_CR0_MP)))
5207 {
5208 Assert(enmAccessCrX != IEMACCESSCRX_CLTS);
5209 Log(("iemCImpl_load_Cr%#x: TS/MP bit changed or lmsw instr: Guest intercept -> #VMEXIT\n", iCrReg));
5210 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CR0_SEL_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5211 }
5212 }
5213
5214 /*
5215 * Change CR0.
5216 */
5217 if (!IEM_VERIFICATION_ENABLED(pVCpu))
5218 CPUMSetGuestCR0(pVCpu, uNewCrX);
5219 else
5220 pCtx->cr0 = uNewCrX;
5221 Assert(pCtx->cr0 == uNewCrX);
5222
5223 /*
5224 * Change EFER.LMA if entering or leaving long mode.
5225 */
5226 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
5227 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
5228 {
5229 uint64_t NewEFER = pCtx->msrEFER;
5230 if (uNewCrX & X86_CR0_PG)
5231 NewEFER |= MSR_K6_EFER_LMA;
5232 else
5233 NewEFER &= ~MSR_K6_EFER_LMA;
5234
5235 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5236 CPUMSetGuestEFER(pVCpu, NewEFER);
5237 else
5238 pCtx->msrEFER = NewEFER;
5239 Assert(pCtx->msrEFER == NewEFER);
5240 }
5241
5242 /*
5243 * Inform PGM.
5244 */
5245 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5246 {
5247 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
5248 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
5249 {
5250 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
5251 AssertRCReturn(rc, rc);
5252 /* ignore informational status codes */
5253 }
5254 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
5255 }
5256 else
5257 rcStrict = VINF_SUCCESS;
5258
5259#ifdef IN_RC
5260 /* Return to ring-3 for rescheduling if WP or AM changes. */
5261 if ( rcStrict == VINF_SUCCESS
5262 && ( (uNewCrX & (X86_CR0_WP | X86_CR0_AM))
5263 != (uOldCrX & (X86_CR0_WP | X86_CR0_AM))) )
5264 rcStrict = VINF_EM_RESCHEDULE;
5265#endif
5266 break;
5267 }
5268
5269 /*
5270 * CR2 can be changed without any restrictions.
5271 */
5272 case 2:
5273 {
5274 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 2))
5275 {
5276 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5277 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);
5278 }
5279 pCtx->cr2 = uNewCrX;
5280 rcStrict = VINF_SUCCESS;
5281 break;
5282 }
5283
5284 /*
5285 * CR3 is relatively simple, although AMD and Intel have different
5286 * accounts of how setting reserved bits are handled. We take intel's
5287 * word for the lower bits and AMD's for the high bits (63:52). The
5288 * lower reserved bits are ignored and left alone; OpenBSD 5.8 relies
5289 * on this.
5290 */
5291 /** @todo Testcase: Setting reserved bits in CR3, especially before
5292 * enabling paging. */
5293 case 3:
5294 {
5295 /* check / mask the value. */
5296 if (uNewCrX & UINT64_C(0xfff0000000000000))
5297 {
5298 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
5299 return iemRaiseGeneralProtectionFault0(pVCpu);
5300 }
5301
5302 uint64_t fValid;
5303 if ( (pCtx->cr4 & X86_CR4_PAE)
5304 && (pCtx->msrEFER & MSR_K6_EFER_LME))
5305 fValid = UINT64_C(0x000fffffffffffff);
5306 else
5307 fValid = UINT64_C(0xffffffff);
5308 if (uNewCrX & ~fValid)
5309 {
5310 Log(("Automatically clearing reserved MBZ bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
5311 uNewCrX, uNewCrX & ~fValid));
5312 uNewCrX &= fValid;
5313 }
5314
5315 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 3))
5316 {
5317 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5318 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);
5319 }
5320
5321 /** @todo If we're in PAE mode we should check the PDPTRs for
5322 * invalid bits. */
5323
5324 /* Make the change. */
5325 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5326 {
5327 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
5328 AssertRCSuccessReturn(rc, rc);
5329 }
5330 else
5331 pCtx->cr3 = uNewCrX;
5332
5333 /* Inform PGM. */
5334 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5335 {
5336 if (pCtx->cr0 & X86_CR0_PG)
5337 {
5338 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
5339 AssertRCReturn(rc, rc);
5340 /* ignore informational status codes */
5341 }
5342 }
5343 rcStrict = VINF_SUCCESS;
5344 break;
5345 }
5346
5347 /*
5348 * CR4 is a bit more tedious as there are bits which cannot be cleared
5349 * under some circumstances and such.
5350 */
5351 case 4:
5352 {
5353 uint64_t const uOldCrX = pCtx->cr4;
5354
5355 /** @todo Shouldn't this look at the guest CPUID bits to determine
5356 * valid bits? e.g. if guest CPUID doesn't allow X86_CR4_OSXMMEEXCPT, we
5357 * should #GP(0). */
5358 /* reserved bits */
5359 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
5360 | X86_CR4_TSD | X86_CR4_DE
5361 | X86_CR4_PSE | X86_CR4_PAE
5362 | X86_CR4_MCE | X86_CR4_PGE
5363 | X86_CR4_PCE | X86_CR4_OSFXSR
5364 | X86_CR4_OSXMMEEXCPT;
5365 //if (xxx)
5366 // fValid |= X86_CR4_VMXE;
5367 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fXSaveRstor)
5368 fValid |= X86_CR4_OSXSAVE;
5369 if (uNewCrX & ~(uint64_t)fValid)
5370 {
5371 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5372 return iemRaiseGeneralProtectionFault0(pVCpu);
5373 }
5374
5375 /* long mode checks. */
5376 if ( (uOldCrX & X86_CR4_PAE)
5377 && !(uNewCrX & X86_CR4_PAE)
5378 && CPUMIsGuestInLongModeEx(pCtx) )
5379 {
5380 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
5381 return iemRaiseGeneralProtectionFault0(pVCpu);
5382 }
5383
5384 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 4))
5385 {
5386 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5387 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
5388 }
5389
5390 /*
5391 * Change it.
5392 */
5393 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5394 {
5395 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
5396 AssertRCSuccessReturn(rc, rc);
5397 }
5398 else
5399 pCtx->cr4 = uNewCrX;
5400 Assert(pCtx->cr4 == uNewCrX);
5401
5402 /*
5403 * Notify SELM and PGM.
5404 */
5405 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5406 {
5407 /* SELM - VME may change things wrt to the TSS shadowing. */
5408 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
5409 {
5410 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
5411 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
5412#ifdef VBOX_WITH_RAW_MODE
5413 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
5414 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
5415#endif
5416 }
5417
5418 /* PGM - flushing and mode. */
5419 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE))
5420 {
5421 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
5422 AssertRCReturn(rc, rc);
5423 /* ignore informational status codes */
5424 }
5425 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
5426 }
5427 else
5428 rcStrict = VINF_SUCCESS;
5429 break;
5430 }
5431
5432 /*
5433 * CR8 maps to the APIC TPR.
5434 */
5435 case 8:
5436 if (uNewCrX & ~(uint64_t)0xf)
5437 {
5438 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
5439 return iemRaiseGeneralProtectionFault0(pVCpu);
5440 }
5441
5442 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 8))
5443 {
5444 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5445 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);
5446 }
5447
5448 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5449 APICSetTpr(pVCpu, (uint8_t)uNewCrX << 4);
5450 rcStrict = VINF_SUCCESS;
5451 break;
5452
5453 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5454 }
5455
5456 /*
5457 * Advance the RIP on success.
5458 */
5459 if (RT_SUCCESS(rcStrict))
5460 {
5461 if (rcStrict != VINF_SUCCESS)
5462 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5463 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5464 }
5465
5466 return rcStrict;
5467}
5468
5469
5470/**
5471 * Implements mov CRx,GReg.
5472 *
5473 * @param iCrReg The CRx register to write (valid).
5474 * @param iGReg The general register to load the DRx value from.
5475 */
5476IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
5477{
5478 if (pVCpu->iem.s.uCpl != 0)
5479 return iemRaiseGeneralProtectionFault0(pVCpu);
5480 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
5481
5482 /*
5483 * Read the new value from the source register and call common worker.
5484 */
5485 uint64_t uNewCrX;
5486 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5487 uNewCrX = iemGRegFetchU64(pVCpu, iGReg);
5488 else
5489 uNewCrX = iemGRegFetchU32(pVCpu, iGReg);
5490 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, iCrReg, uNewCrX, IEMACCESSCRX_MOV_CRX, iGReg);
5491}
5492
5493
5494/**
5495 * Implements 'LMSW r/m16'
5496 *
5497 * @param u16NewMsw The new value.
5498 */
5499IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
5500{
5501 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5502
5503 if (pVCpu->iem.s.uCpl != 0)
5504 return iemRaiseGeneralProtectionFault0(pVCpu);
5505 Assert(!pCtx->eflags.Bits.u1VM);
5506
5507 /*
5508 * Compose the new CR0 value and call common worker.
5509 */
5510 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5511 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5512 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */);
5513}
5514
5515
5516/**
5517 * Implements 'CLTS'.
5518 */
5519IEM_CIMPL_DEF_0(iemCImpl_clts)
5520{
5521 if (pVCpu->iem.s.uCpl != 0)
5522 return iemRaiseGeneralProtectionFault0(pVCpu);
5523
5524 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5525 uint64_t uNewCr0 = pCtx->cr0;
5526 uNewCr0 &= ~X86_CR0_TS;
5527 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_CLTS, UINT8_MAX /* iGReg */);
5528}
5529
5530
5531/**
5532 * Implements mov GReg,DRx.
5533 *
5534 * @param iGReg The general register to store the DRx value in.
5535 * @param iDrReg The DRx register to read (0-7).
5536 */
5537IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
5538{
5539 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5540
5541 /*
5542 * Check preconditions.
5543 */
5544
5545 /* Raise GPs. */
5546 if (pVCpu->iem.s.uCpl != 0)
5547 return iemRaiseGeneralProtectionFault0(pVCpu);
5548 Assert(!pCtx->eflags.Bits.u1VM);
5549
5550 if ( (iDrReg == 4 || iDrReg == 5)
5551 && (pCtx->cr4 & X86_CR4_DE) )
5552 {
5553 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
5554 return iemRaiseGeneralProtectionFault0(pVCpu);
5555 }
5556
5557 /* Raise #DB if general access detect is enabled. */
5558 if (pCtx->dr[7] & X86_DR7_GD)
5559 {
5560 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
5561 return iemRaiseDebugException(pVCpu);
5562 }
5563
5564 /*
5565 * Read the debug register and store it in the specified general register.
5566 */
5567 uint64_t drX;
5568 switch (iDrReg)
5569 {
5570 case 0: drX = pCtx->dr[0]; break;
5571 case 1: drX = pCtx->dr[1]; break;
5572 case 2: drX = pCtx->dr[2]; break;
5573 case 3: drX = pCtx->dr[3]; break;
5574 case 6:
5575 case 4:
5576 drX = pCtx->dr[6];
5577 drX |= X86_DR6_RA1_MASK;
5578 drX &= ~X86_DR6_RAZ_MASK;
5579 break;
5580 case 7:
5581 case 5:
5582 drX = pCtx->dr[7];
5583 drX |=X86_DR7_RA1_MASK;
5584 drX &= ~X86_DR7_RAZ_MASK;
5585 break;
5586 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5587 }
5588
5589 /** @todo SVM nested-guest intercept for DR8-DR15? */
5590 /*
5591 * Check for any SVM nested-guest intercepts for the DRx read.
5592 */
5593 if (IEM_IS_SVM_READ_DR_INTERCEPT_SET(pVCpu, iDrReg))
5594 {
5595 Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg));
5596 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),
5597 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
5598 }
5599
5600 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5601 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = drX;
5602 else
5603 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)drX;
5604
5605 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5606 return VINF_SUCCESS;
5607}
5608
5609
5610/**
5611 * Implements mov DRx,GReg.
5612 *
5613 * @param iDrReg The DRx register to write (valid).
5614 * @param iGReg The general register to load the DRx value from.
5615 */
5616IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
5617{
5618 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5619
5620 /*
5621 * Check preconditions.
5622 */
5623 if (pVCpu->iem.s.uCpl != 0)
5624 return iemRaiseGeneralProtectionFault0(pVCpu);
5625 Assert(!pCtx->eflags.Bits.u1VM);
5626
5627 if (iDrReg == 4 || iDrReg == 5)
5628 {
5629 if (pCtx->cr4 & X86_CR4_DE)
5630 {
5631 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
5632 return iemRaiseGeneralProtectionFault0(pVCpu);
5633 }
5634 iDrReg += 2;
5635 }
5636
5637 /* Raise #DB if general access detect is enabled. */
5638 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
5639 * \#GP? */
5640 if (pCtx->dr[7] & X86_DR7_GD)
5641 {
5642 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
5643 return iemRaiseDebugException(pVCpu);
5644 }
5645
5646 /*
5647 * Read the new value from the source register.
5648 */
5649 uint64_t uNewDrX;
5650 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5651 uNewDrX = iemGRegFetchU64(pVCpu, iGReg);
5652 else
5653 uNewDrX = iemGRegFetchU32(pVCpu, iGReg);
5654
5655 /*
5656 * Adjust it.
5657 */
5658 switch (iDrReg)
5659 {
5660 case 0:
5661 case 1:
5662 case 2:
5663 case 3:
5664 /* nothing to adjust */
5665 break;
5666
5667 case 6:
5668 if (uNewDrX & X86_DR6_MBZ_MASK)
5669 {
5670 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5671 return iemRaiseGeneralProtectionFault0(pVCpu);
5672 }
5673 uNewDrX |= X86_DR6_RA1_MASK;
5674 uNewDrX &= ~X86_DR6_RAZ_MASK;
5675 break;
5676
5677 case 7:
5678 if (uNewDrX & X86_DR7_MBZ_MASK)
5679 {
5680 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5681 return iemRaiseGeneralProtectionFault0(pVCpu);
5682 }
5683 uNewDrX |= X86_DR7_RA1_MASK;
5684 uNewDrX &= ~X86_DR7_RAZ_MASK;
5685 break;
5686
5687 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5688 }
5689
5690 /** @todo SVM nested-guest intercept for DR8-DR15? */
5691 /*
5692 * Check for any SVM nested-guest intercepts for the DRx write.
5693 */
5694 if (IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(pVCpu, iDrReg))
5695 {
5696 Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg));
5697 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),
5698 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
5699 }
5700
5701 /*
5702 * Do the actual setting.
5703 */
5704 if (!IEM_VERIFICATION_ENABLED(pVCpu))
5705 {
5706 int rc = CPUMSetGuestDRx(pVCpu, iDrReg, uNewDrX);
5707 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc);
5708 }
5709 else
5710 pCtx->dr[iDrReg] = uNewDrX;
5711
5712 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5713 return VINF_SUCCESS;
5714}
5715
5716
5717/**
5718 * Implements 'INVLPG m'.
5719 *
5720 * @param GCPtrPage The effective address of the page to invalidate.
5721 * @remarks Updates the RIP.
5722 */
5723IEM_CIMPL_DEF_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage)
5724{
5725 /* ring-0 only. */
5726 if (pVCpu->iem.s.uCpl != 0)
5727 return iemRaiseGeneralProtectionFault0(pVCpu);
5728 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
5729
5730 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
5731 {
5732 Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
5733 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_INVLPG,
5734 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? GCPtrPage : 0, 0 /* uExitInfo2 */);
5735 }
5736
5737 int rc = PGMInvalidatePage(pVCpu, GCPtrPage);
5738 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5739
5740 if (rc == VINF_SUCCESS)
5741 return VINF_SUCCESS;
5742 if (rc == VINF_PGM_SYNC_CR3)
5743 return iemSetPassUpStatus(pVCpu, rc);
5744
5745 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
5746 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", GCPtrPage, rc));
5747 return rc;
5748}
5749
5750
5751/**
5752 * Implements RDTSC.
5753 */
5754IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
5755{
5756 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5757
5758 /*
5759 * Check preconditions.
5760 */
5761 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fTsc)
5762 return iemRaiseUndefinedOpcode(pVCpu);
5763
5764 if ( (pCtx->cr4 & X86_CR4_TSD)
5765 && pVCpu->iem.s.uCpl != 0)
5766 {
5767 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5768 return iemRaiseGeneralProtectionFault0(pVCpu);
5769 }
5770
5771 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
5772 {
5773 Log(("rdtsc: Guest intercept -> #VMEXIT\n"));
5774 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5775 }
5776
5777 /*
5778 * Do the job.
5779 */
5780 uint64_t uTicks = TMCpuTickGet(pVCpu);
5781 pCtx->rax = RT_LO_U32(uTicks);
5782 pCtx->rdx = RT_HI_U32(uTicks);
5783#ifdef IEM_VERIFICATION_MODE_FULL
5784 pVCpu->iem.s.fIgnoreRaxRdx = true;
5785#endif
5786
5787 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5788 return VINF_SUCCESS;
5789}
5790
5791
5792/**
5793 * Implements RDTSC.
5794 */
5795IEM_CIMPL_DEF_0(iemCImpl_rdtscp)
5796{
5797 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5798
5799 /*
5800 * Check preconditions.
5801 */
5802 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fRdTscP)
5803 return iemRaiseUndefinedOpcode(pVCpu);
5804
5805 if ( (pCtx->cr4 & X86_CR4_TSD)
5806 && pVCpu->iem.s.uCpl != 0)
5807 {
5808 Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5809 return iemRaiseGeneralProtectionFault0(pVCpu);
5810 }
5811
5812 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
5813 {
5814 Log(("rdtscp: Guest intercept -> #VMEXIT\n"));
5815 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5816 }
5817
5818 /*
5819 * Do the job.
5820 * Query the MSR first in case of trips to ring-3.
5821 */
5822 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx);
5823 if (rcStrict == VINF_SUCCESS)
5824 {
5825 /* Low dword of the TSC_AUX msr only. */
5826 pCtx->rcx &= UINT32_C(0xffffffff);
5827
5828 uint64_t uTicks = TMCpuTickGet(pVCpu);
5829 pCtx->rax = RT_LO_U32(uTicks);
5830 pCtx->rdx = RT_HI_U32(uTicks);
5831#ifdef IEM_VERIFICATION_MODE_FULL
5832 pVCpu->iem.s.fIgnoreRaxRdx = true;
5833#endif
5834 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5835 }
5836 return rcStrict;
5837}
5838
5839
5840/**
5841 * Implements RDPMC.
5842 */
5843IEM_CIMPL_DEF_0(iemCImpl_rdpmc)
5844{
5845 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5846 if ( pVCpu->iem.s.uCpl != 0
5847 && !(pCtx->cr4 & X86_CR4_PCE))
5848 return iemRaiseGeneralProtectionFault0(pVCpu);
5849
5850 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
5851 {
5852 Log(("rdpmc: Guest intercept -> #VMEXIT\n"));
5853 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5854 }
5855
5856 /** @todo Implement RDPMC for the regular guest execution case (the above only
5857 * handles nested-guest intercepts). */
5858 RT_NOREF(cbInstr);
5859 return VERR_IEM_INSTR_NOT_IMPLEMENTED;
5860}
5861
5862
5863/**
5864 * Implements RDMSR.
5865 */
5866IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
5867{
5868 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5869
5870 /*
5871 * Check preconditions.
5872 */
5873 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
5874 return iemRaiseUndefinedOpcode(pVCpu);
5875 if (pVCpu->iem.s.uCpl != 0)
5876 return iemRaiseGeneralProtectionFault0(pVCpu);
5877
5878 /*
5879 * Do the job.
5880 */
5881 RTUINT64U uValue;
5882 VBOXSTRICTRC rcStrict;
5883 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
5884 {
5885 rcStrict = IEM_SVM_NST_GST_MSR_INTERCEPT(pVCpu, pCtx->ecx, false /* fWrite */);
5886 if (rcStrict == VINF_SVM_VMEXIT)
5887 return VINF_SUCCESS;
5888 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
5889 {
5890 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pCtx->ecx, VBOXSTRICTRC_VAL(rcStrict)));
5891 return rcStrict;
5892 }
5893 }
5894
5895 rcStrict = CPUMQueryGuestMsr(pVCpu, pCtx->ecx, &uValue.u);
5896 if (rcStrict == VINF_SUCCESS)
5897 {
5898 pCtx->rax = uValue.s.Lo;
5899 pCtx->rdx = uValue.s.Hi;
5900
5901 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5902 return VINF_SUCCESS;
5903 }
5904
5905#ifndef IN_RING3
5906 /* Deferred to ring-3. */
5907 if (rcStrict == VINF_CPUM_R3_MSR_READ)
5908 {
5909 Log(("IEM: rdmsr(%#x) -> ring-3\n", pCtx->ecx));
5910 return rcStrict;
5911 }
5912#else /* IN_RING3 */
5913 /* Often a unimplemented MSR or MSR bit, so worth logging. */
5914 static uint32_t s_cTimes = 0;
5915 if (s_cTimes++ < 10)
5916 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
5917 else
5918#endif
5919 Log(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
5920 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
5921 return iemRaiseGeneralProtectionFault0(pVCpu);
5922}
5923
5924
5925/**
5926 * Implements WRMSR.
5927 */
5928IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
5929{
5930 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5931
5932 /*
5933 * Check preconditions.
5934 */
5935 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
5936 return iemRaiseUndefinedOpcode(pVCpu);
5937 if (pVCpu->iem.s.uCpl != 0)
5938 return iemRaiseGeneralProtectionFault0(pVCpu);
5939
5940 /*
5941 * Do the job.
5942 */
5943 RTUINT64U uValue;
5944 uValue.s.Lo = pCtx->eax;
5945 uValue.s.Hi = pCtx->edx;
5946
5947 VBOXSTRICTRC rcStrict;
5948 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
5949 {
5950 rcStrict = IEM_SVM_NST_GST_MSR_INTERCEPT(pVCpu, pCtx->ecx, true /* fWrite */);
5951 if (rcStrict == VINF_SVM_VMEXIT)
5952 return VINF_SUCCESS;
5953 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
5954 {
5955 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pCtx->ecx, VBOXSTRICTRC_VAL(rcStrict)));
5956 return rcStrict;
5957 }
5958 }
5959
5960 if (!IEM_VERIFICATION_ENABLED(pVCpu))
5961 rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u);
5962 else
5963 {
5964#ifdef IN_RING3
5965 CPUMCTX CtxTmp = *pCtx;
5966 rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u);
5967 PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
5968 *pCtx = *pCtx2;
5969 *pCtx2 = CtxTmp;
5970#else
5971 AssertReleaseFailedReturn(VERR_IEM_IPE_2);
5972#endif
5973 }
5974 if (rcStrict == VINF_SUCCESS)
5975 {
5976 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5977 return VINF_SUCCESS;
5978 }
5979
5980#ifndef IN_RING3
5981 /* Deferred to ring-3. */
5982 if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
5983 {
5984 Log(("IEM: wrmsr(%#x) -> ring-3\n", pCtx->ecx));
5985 return rcStrict;
5986 }
5987#else /* IN_RING3 */
5988 /* Often a unimplemented MSR or MSR bit, so worth logging. */
5989 static uint32_t s_cTimes = 0;
5990 if (s_cTimes++ < 10)
5991 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
5992 else
5993#endif
5994 Log(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
5995 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
5996 return iemRaiseGeneralProtectionFault0(pVCpu);
5997}
5998
5999
6000/**
6001 * Implements 'IN eAX, port'.
6002 *
6003 * @param u16Port The source port.
6004 * @param cbReg The register size.
6005 */
6006IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
6007{
6008 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6009
6010 /*
6011 * CPL check
6012 */
6013 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx, u16Port, cbReg);
6014 if (rcStrict != VINF_SUCCESS)
6015 return rcStrict;
6016
6017 /*
6018 * Check SVM nested-guest IO intercept.
6019 */
6020 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
6021 {
6022 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, cbReg, 0 /* N/A - cAddrSizeBits */,
6023 0 /* N/A - iEffSeg */, false /* fRep */, false /* fStrIo */, cbInstr);
6024 if (rcStrict == VINF_SVM_VMEXIT)
6025 return VINF_SUCCESS;
6026 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
6027 {
6028 Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
6029 VBOXSTRICTRC_VAL(rcStrict)));
6030 return rcStrict;
6031 }
6032 }
6033
6034 /*
6035 * Perform the I/O.
6036 */
6037 uint32_t u32Value;
6038 if (!IEM_VERIFICATION_ENABLED(pVCpu))
6039 rcStrict = IOMIOPortRead(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, &u32Value, cbReg);
6040 else
6041 rcStrict = iemVerifyFakeIOPortRead(pVCpu, u16Port, &u32Value, cbReg);
6042 if (IOM_SUCCESS(rcStrict))
6043 {
6044 switch (cbReg)
6045 {
6046 case 1: pCtx->al = (uint8_t)u32Value; break;
6047 case 2: pCtx->ax = (uint16_t)u32Value; break;
6048 case 4: pCtx->rax = u32Value; break;
6049 default: AssertFailedReturn(VERR_IEM_IPE_3);
6050 }
6051 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6052 pVCpu->iem.s.cPotentialExits++;
6053 if (rcStrict != VINF_SUCCESS)
6054 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6055 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
6056
6057 /*
6058 * Check for I/O breakpoints.
6059 */
6060 uint32_t const uDr7 = pCtx->dr[7];
6061 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
6062 && X86_DR7_ANY_RW_IO(uDr7)
6063 && (pCtx->cr4 & X86_CR4_DE))
6064 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM))))
6065 {
6066 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx, u16Port, cbReg);
6067 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6068 rcStrict = iemRaiseDebugException(pVCpu);
6069 }
6070 }
6071
6072 return rcStrict;
6073}
6074
6075
6076/**
6077 * Implements 'IN eAX, DX'.
6078 *
6079 * @param cbReg The register size.
6080 */
6081IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
6082{
6083 return IEM_CIMPL_CALL_2(iemCImpl_in, IEM_GET_CTX(pVCpu)->dx, cbReg);
6084}
6085
6086
6087/**
6088 * Implements 'OUT port, eAX'.
6089 *
6090 * @param u16Port The destination port.
6091 * @param cbReg The register size.
6092 */
6093IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
6094{
6095 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6096
6097 /*
6098 * CPL check
6099 */
6100 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx, u16Port, cbReg);
6101 if (rcStrict != VINF_SUCCESS)
6102 return rcStrict;
6103
6104 /*
6105 * Check SVM nested-guest IO intercept.
6106 */
6107 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
6108 {
6109 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, cbReg, 0 /* N/A - cAddrSizeBits */,
6110 0 /* N/A - iEffSeg */, false /* fRep */, false /* fStrIo */, cbInstr);
6111 if (rcStrict == VINF_SVM_VMEXIT)
6112 return VINF_SUCCESS;
6113 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
6114 {
6115 Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
6116 VBOXSTRICTRC_VAL(rcStrict)));
6117 return rcStrict;
6118 }
6119 }
6120
6121 /*
6122 * Perform the I/O.
6123 */
6124 uint32_t u32Value;
6125 switch (cbReg)
6126 {
6127 case 1: u32Value = pCtx->al; break;
6128 case 2: u32Value = pCtx->ax; break;
6129 case 4: u32Value = pCtx->eax; break;
6130 default: AssertFailedReturn(VERR_IEM_IPE_4);
6131 }
6132 if (!IEM_VERIFICATION_ENABLED(pVCpu))
6133 rcStrict = IOMIOPortWrite(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, u32Value, cbReg);
6134 else
6135 rcStrict = iemVerifyFakeIOPortWrite(pVCpu, u16Port, u32Value, cbReg);
6136 if (IOM_SUCCESS(rcStrict))
6137 {
6138 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6139 pVCpu->iem.s.cPotentialExits++;
6140 if (rcStrict != VINF_SUCCESS)
6141 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6142 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
6143
6144 /*
6145 * Check for I/O breakpoints.
6146 */
6147 uint32_t const uDr7 = pCtx->dr[7];
6148 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
6149 && X86_DR7_ANY_RW_IO(uDr7)
6150 && (pCtx->cr4 & X86_CR4_DE))
6151 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM))))
6152 {
6153 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx, u16Port, cbReg);
6154 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6155 rcStrict = iemRaiseDebugException(pVCpu);
6156 }
6157 }
6158 return rcStrict;
6159}
6160
6161
6162/**
6163 * Implements 'OUT DX, eAX'.
6164 *
6165 * @param cbReg The register size.
6166 */
6167IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
6168{
6169 return IEM_CIMPL_CALL_2(iemCImpl_out, IEM_GET_CTX(pVCpu)->dx, cbReg);
6170}
6171
6172
6173#ifdef VBOX_WITH_NESTED_HWVIRT
6174/**
6175 * Implements 'VMRUN'.
6176 */
6177IEM_CIMPL_DEF_0(iemCImpl_vmrun)
6178{
6179 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6180 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);
6181
6182 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
6183 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
6184 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
6185 {
6186 Log(("vmrun: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
6187 return iemRaiseGeneralProtectionFault0(pVCpu);
6188 }
6189
6190 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))
6191 {
6192 Log(("vmrun: Guest intercept -> #VMEXIT\n"));
6193 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6194 }
6195
6196 VBOXSTRICTRC rcStrict = HMSvmVmrun(pVCpu, pCtx, GCPhysVmcb);
6197 /* If VMRUN execution causes a #VMEXIT, we continue executing the instruction following the VMRUN. */
6198 if (rcStrict == VINF_SVM_VMEXIT)
6199 {
6200 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6201 rcStrict = VINF_SUCCESS;
6202 }
6203 else if (rcStrict == VERR_SVM_VMEXIT_FAILED)
6204 rcStrict = iemInitiateCpuShutdown(pVCpu);
6205 return rcStrict;
6206}
6207
6208
6209/**
6210 * Implements 'VMMCALL'.
6211 */
6212IEM_CIMPL_DEF_0(iemCImpl_vmmcall)
6213{
6214 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6215 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))
6216 {
6217 Log(("vmmcall: Guest intercept -> #VMEXIT\n"));
6218 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6219 }
6220
6221 bool fUpdatedRipAndRF;
6222 VBOXSTRICTRC rcStrict = HMSvmVmmcall(pVCpu, pCtx, &fUpdatedRipAndRF);
6223 if (RT_SUCCESS(rcStrict))
6224 {
6225 if (!fUpdatedRipAndRF)
6226 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6227 return rcStrict;
6228 }
6229
6230 return iemRaiseUndefinedOpcode(pVCpu);
6231}
6232
6233
6234/**
6235 * Implements 'VMLOAD'.
6236 */
6237IEM_CIMPL_DEF_0(iemCImpl_vmload)
6238{
6239 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6240 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);
6241
6242 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
6243 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
6244 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
6245 {
6246 Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
6247 return iemRaiseGeneralProtectionFault0(pVCpu);
6248 }
6249
6250 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
6251 {
6252 Log(("vmload: Guest intercept -> #VMEXIT\n"));
6253 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6254 }
6255
6256 void *pvVmcb;
6257 PGMPAGEMAPLOCK PgLockVmcb;
6258 VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, &pvVmcb, &PgLockVmcb);
6259 if (rcStrict == VINF_SUCCESS)
6260 {
6261 PCSVMVMCB pVmcb = (PCSVMVMCB)pvVmcb;
6262 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &pVmcb->guest, FS, fs);
6263 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &pVmcb->guest, GS, gs);
6264 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &pVmcb->guest, TR, tr);
6265 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &pVmcb->guest, LDTR, ldtr);
6266
6267 pCtx->msrKERNELGSBASE = pVmcb->guest.u64KernelGSBase;
6268 pCtx->msrSTAR = pVmcb->guest.u64STAR;
6269 pCtx->msrLSTAR = pVmcb->guest.u64LSTAR;
6270 pCtx->msrCSTAR = pVmcb->guest.u64CSTAR;
6271 pCtx->msrSFMASK = pVmcb->guest.u64SFMASK;
6272
6273 pCtx->SysEnter.cs = pVmcb->guest.u64SysEnterCS;
6274 pCtx->SysEnter.esp = pVmcb->guest.u64SysEnterESP;
6275 pCtx->SysEnter.eip = pVmcb->guest.u64SysEnterEIP;
6276
6277 iemMemPageUnmap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, pvVmcb, &PgLockVmcb);
6278 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6279 }
6280 return rcStrict;
6281}
6282
6283
6284/**
6285 * Implements 'VMSAVE'.
6286 */
6287IEM_CIMPL_DEF_0(iemCImpl_vmsave)
6288{
6289 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6290 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave);
6291
6292 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
6293 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
6294 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
6295 {
6296 Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
6297 return iemRaiseGeneralProtectionFault0(pVCpu);
6298 }
6299
6300 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
6301 {
6302 Log(("vmsave: Guest intercept -> #VMEXIT\n"));
6303 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6304 }
6305
6306 void *pvVmcb;
6307 PGMPAGEMAPLOCK PgLockVmcb;
6308 VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_RW, &pvVmcb, &PgLockVmcb);
6309 if (rcStrict == VINF_SUCCESS)
6310 {
6311 PSVMVMCB pVmcb = (PSVMVMCB)pvVmcb;
6312 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, FS, fs);
6313 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, GS, gs);
6314 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, TR, tr);
6315 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, LDTR, ldtr);
6316
6317 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;
6318 pVmcb->guest.u64STAR = pCtx->msrSTAR;
6319 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR;
6320 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR;
6321 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK;
6322
6323 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs;
6324 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp;
6325 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
6326
6327 iemMemPageUnmap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, pvVmcb, &PgLockVmcb);
6328 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6329 }
6330 return rcStrict;
6331}
6332
6333
6334/**
6335 * Implements 'CLGI'.
6336 */
6337IEM_CIMPL_DEF_0(iemCImpl_clgi)
6338{
6339 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6340 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi);
6341 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
6342 {
6343 Log(("clgi: Guest intercept -> #VMEXIT\n"));
6344 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6345 }
6346
6347 pCtx->hwvirt.svm.fGif = 0;
6348 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6349 return VINF_SUCCESS;
6350}
6351
6352
6353/**
6354 * Implements 'STGI'.
6355 */
6356IEM_CIMPL_DEF_0(iemCImpl_stgi)
6357{
6358 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6359 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi);
6360 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))
6361 {
6362 Log2(("stgi: Guest intercept -> #VMEXIT\n"));
6363 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6364 }
6365
6366 pCtx->hwvirt.svm.fGif = 1;
6367 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6368 return VINF_SUCCESS;
6369}
6370
6371
6372/**
6373 * Implements 'INVLPGA'.
6374 */
6375IEM_CIMPL_DEF_0(iemCImpl_invlpga)
6376{
6377 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6378 RTGCPTR const GCPtrPage = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
6379 /** @todo PGM needs virtual ASID support. */
6380#if 0
6381 uint32_t const uAsid = pCtx->ecx;
6382#endif
6383
6384 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
6385 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
6386 {
6387 Log2(("invlpga: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
6388 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6389 }
6390
6391 PGMInvalidatePage(pVCpu, GCPtrPage);
6392 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6393 return VINF_SUCCESS;
6394}
6395
6396
6397/**
6398 * Implements 'SKINIT'.
6399 */
6400IEM_CIMPL_DEF_0(iemCImpl_skinit)
6401{
6402 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
6403
6404 uint32_t uIgnore;
6405 uint32_t fFeaturesECX;
6406 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &uIgnore, &uIgnore, &fFeaturesECX, &uIgnore);
6407 if (!(fFeaturesECX & X86_CPUID_AMD_FEATURE_ECX_SKINIT))
6408 return iemRaiseUndefinedOpcode(pVCpu);
6409
6410 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))
6411 {
6412 Log2(("skinit: Guest intercept -> #VMEXIT\n"));
6413 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SKINIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6414 }
6415
6416 RT_NOREF(cbInstr);
6417 return VERR_IEM_INSTR_NOT_IMPLEMENTED;
6418}
6419#endif /* VBOX_WITH_NESTED_HWVIRT */
6420
6421/**
6422 * Implements 'CLI'.
6423 */
6424IEM_CIMPL_DEF_0(iemCImpl_cli)
6425{
6426 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6427 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
6428 uint32_t const fEflOld = fEfl;
6429 if (pCtx->cr0 & X86_CR0_PE)
6430 {
6431 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
6432 if (!(fEfl & X86_EFL_VM))
6433 {
6434 if (pVCpu->iem.s.uCpl <= uIopl)
6435 fEfl &= ~X86_EFL_IF;
6436 else if ( pVCpu->iem.s.uCpl == 3
6437 && (pCtx->cr4 & X86_CR4_PVI) )
6438 fEfl &= ~X86_EFL_VIF;
6439 else
6440 return iemRaiseGeneralProtectionFault0(pVCpu);
6441 }
6442 /* V8086 */
6443 else if (uIopl == 3)
6444 fEfl &= ~X86_EFL_IF;
6445 else if ( uIopl < 3
6446 && (pCtx->cr4 & X86_CR4_VME) )
6447 fEfl &= ~X86_EFL_VIF;
6448 else
6449 return iemRaiseGeneralProtectionFault0(pVCpu);
6450 }
6451 /* real mode */
6452 else
6453 fEfl &= ~X86_EFL_IF;
6454
6455 /* Commit. */
6456 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
6457 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6458 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld);
6459 return VINF_SUCCESS;
6460}
6461
6462
6463/**
6464 * Implements 'STI'.
6465 */
6466IEM_CIMPL_DEF_0(iemCImpl_sti)
6467{
6468 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6469 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
6470 uint32_t const fEflOld = fEfl;
6471
6472 if (pCtx->cr0 & X86_CR0_PE)
6473 {
6474 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
6475 if (!(fEfl & X86_EFL_VM))
6476 {
6477 if (pVCpu->iem.s.uCpl <= uIopl)
6478 fEfl |= X86_EFL_IF;
6479 else if ( pVCpu->iem.s.uCpl == 3
6480 && (pCtx->cr4 & X86_CR4_PVI)
6481 && !(fEfl & X86_EFL_VIP) )
6482 fEfl |= X86_EFL_VIF;
6483 else
6484 return iemRaiseGeneralProtectionFault0(pVCpu);
6485 }
6486 /* V8086 */
6487 else if (uIopl == 3)
6488 fEfl |= X86_EFL_IF;
6489 else if ( uIopl < 3
6490 && (pCtx->cr4 & X86_CR4_VME)
6491 && !(fEfl & X86_EFL_VIP) )
6492 fEfl |= X86_EFL_VIF;
6493 else
6494 return iemRaiseGeneralProtectionFault0(pVCpu);
6495 }
6496 /* real mode */
6497 else
6498 fEfl |= X86_EFL_IF;
6499
6500 /* Commit. */
6501 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
6502 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6503 if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
6504 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
6505 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
6506 return VINF_SUCCESS;
6507}
6508
6509
6510/**
6511 * Implements 'HLT'.
6512 */
6513IEM_CIMPL_DEF_0(iemCImpl_hlt)
6514{
6515 if (pVCpu->iem.s.uCpl != 0)
6516 return iemRaiseGeneralProtectionFault0(pVCpu);
6517
6518 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_HLT))
6519 {
6520 Log2(("hlt: Guest intercept -> #VMEXIT\n"));
6521 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6522 }
6523
6524 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6525 return VINF_EM_HALT;
6526}
6527
6528
6529/**
6530 * Implements 'MONITOR'.
6531 */
6532IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
6533{
6534 /*
6535 * Permission checks.
6536 */
6537 if (pVCpu->iem.s.uCpl != 0)
6538 {
6539 Log2(("monitor: CPL != 0\n"));
6540 return iemRaiseUndefinedOpcode(pVCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
6541 }
6542 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
6543 {
6544 Log2(("monitor: Not in CPUID\n"));
6545 return iemRaiseUndefinedOpcode(pVCpu);
6546 }
6547
6548 /*
6549 * Gather the operands and validate them.
6550 */
6551 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6552 RTGCPTR GCPtrMem = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
6553 uint32_t uEcx = pCtx->ecx;
6554 uint32_t uEdx = pCtx->edx;
6555/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
6556 * \#GP first. */
6557 if (uEcx != 0)
6558 {
6559 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx);
6560 return iemRaiseGeneralProtectionFault0(pVCpu);
6561 }
6562
6563 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
6564 if (rcStrict != VINF_SUCCESS)
6565 return rcStrict;
6566
6567 RTGCPHYS GCPhysMem;
6568 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
6569 if (rcStrict != VINF_SUCCESS)
6570 return rcStrict;
6571
6572 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
6573 {
6574 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
6575 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6576 }
6577
6578 /*
6579 * Call EM to prepare the monitor/wait.
6580 */
6581 rcStrict = EMMonitorWaitPrepare(pVCpu, pCtx->rax, pCtx->rcx, pCtx->rdx, GCPhysMem);
6582 Assert(rcStrict == VINF_SUCCESS);
6583
6584 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6585 return rcStrict;
6586}
6587
6588
6589/**
6590 * Implements 'MWAIT'.
6591 */
6592IEM_CIMPL_DEF_0(iemCImpl_mwait)
6593{
6594 /*
6595 * Permission checks.
6596 */
6597 if (pVCpu->iem.s.uCpl != 0)
6598 {
6599 Log2(("mwait: CPL != 0\n"));
6600 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
6601 * EFLAGS.VM then.) */
6602 return iemRaiseUndefinedOpcode(pVCpu);
6603 }
6604 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
6605 {
6606 Log2(("mwait: Not in CPUID\n"));
6607 return iemRaiseUndefinedOpcode(pVCpu);
6608 }
6609
6610 /*
6611 * Gather the operands and validate them.
6612 */
6613 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6614 uint32_t uEax = pCtx->eax;
6615 uint32_t uEcx = pCtx->ecx;
6616 if (uEcx != 0)
6617 {
6618 /* Only supported extension is break on IRQ when IF=0. */
6619 if (uEcx > 1)
6620 {
6621 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
6622 return iemRaiseGeneralProtectionFault0(pVCpu);
6623 }
6624 uint32_t fMWaitFeatures = 0;
6625 uint32_t uIgnore = 0;
6626 CPUMGetGuestCpuId(pVCpu, 5, 0, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
6627 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
6628 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
6629 {
6630 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
6631 return iemRaiseGeneralProtectionFault0(pVCpu);
6632 }
6633 }
6634
6635 /*
6636 * Check SVM nested-guest mwait intercepts.
6637 */
6638 if ( IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT_ARMED)
6639 && EMMonitorIsArmed(pVCpu))
6640 {
6641 Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n"));
6642 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6643 }
6644 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
6645 {
6646 Log2(("mwait: Guest intercept -> #VMEXIT\n"));
6647 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6648 }
6649
6650 /*
6651 * Call EM to prepare the monitor/wait.
6652 */
6653 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(pVCpu, uEax, uEcx);
6654
6655 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6656 return rcStrict;
6657}
6658
6659
6660/**
6661 * Implements 'SWAPGS'.
6662 */
6663IEM_CIMPL_DEF_0(iemCImpl_swapgs)
6664{
6665 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); /* Caller checks this. */
6666
6667 /*
6668 * Permission checks.
6669 */
6670 if (pVCpu->iem.s.uCpl != 0)
6671 {
6672 Log2(("swapgs: CPL != 0\n"));
6673 return iemRaiseUndefinedOpcode(pVCpu);
6674 }
6675
6676 /*
6677 * Do the job.
6678 */
6679 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6680 uint64_t uOtherGsBase = pCtx->msrKERNELGSBASE;
6681 pCtx->msrKERNELGSBASE = pCtx->gs.u64Base;
6682 pCtx->gs.u64Base = uOtherGsBase;
6683
6684 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6685 return VINF_SUCCESS;
6686}
6687
6688
6689/**
6690 * Implements 'CPUID'.
6691 */
6692IEM_CIMPL_DEF_0(iemCImpl_cpuid)
6693{
6694 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6695
6696 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
6697 {
6698 Log2(("cpuid: Guest intercept -> #VMEXIT\n"));
6699 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6700 }
6701
6702 CPUMGetGuestCpuId(pVCpu, pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
6703 pCtx->rax &= UINT32_C(0xffffffff);
6704 pCtx->rbx &= UINT32_C(0xffffffff);
6705 pCtx->rcx &= UINT32_C(0xffffffff);
6706 pCtx->rdx &= UINT32_C(0xffffffff);
6707
6708 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6709 return VINF_SUCCESS;
6710}
6711
6712
6713/**
6714 * Implements 'AAD'.
6715 *
6716 * @param bImm The immediate operand.
6717 */
6718IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
6719{
6720 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6721
6722 uint16_t const ax = pCtx->ax;
6723 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
6724 pCtx->ax = al;
6725 iemHlpUpdateArithEFlagsU8(pVCpu, al,
6726 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
6727 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
6728
6729 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6730 return VINF_SUCCESS;
6731}
6732
6733
6734/**
6735 * Implements 'AAM'.
6736 *
6737 * @param bImm The immediate operand. Cannot be 0.
6738 */
6739IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
6740{
6741 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6742 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
6743
6744 uint16_t const ax = pCtx->ax;
6745 uint8_t const al = (uint8_t)ax % bImm;
6746 uint8_t const ah = (uint8_t)ax / bImm;
6747 pCtx->ax = (ah << 8) + al;
6748 iemHlpUpdateArithEFlagsU8(pVCpu, al,
6749 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
6750 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
6751
6752 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6753 return VINF_SUCCESS;
6754}
6755
6756
6757/**
6758 * Implements 'DAA'.
6759 */
6760IEM_CIMPL_DEF_0(iemCImpl_daa)
6761{
6762 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6763
6764 uint8_t const al = pCtx->al;
6765 bool const fCarry = pCtx->eflags.Bits.u1CF;
6766
6767 if ( pCtx->eflags.Bits.u1AF
6768 || (al & 0xf) >= 10)
6769 {
6770 pCtx->al = al + 6;
6771 pCtx->eflags.Bits.u1AF = 1;
6772 }
6773 else
6774 pCtx->eflags.Bits.u1AF = 0;
6775
6776 if (al >= 0x9a || fCarry)
6777 {
6778 pCtx->al += 0x60;
6779 pCtx->eflags.Bits.u1CF = 1;
6780 }
6781 else
6782 pCtx->eflags.Bits.u1CF = 0;
6783
6784 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6785 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6786 return VINF_SUCCESS;
6787}
6788
6789
6790/**
6791 * Implements 'DAS'.
6792 */
6793IEM_CIMPL_DEF_0(iemCImpl_das)
6794{
6795 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6796
6797 uint8_t const uInputAL = pCtx->al;
6798 bool const fCarry = pCtx->eflags.Bits.u1CF;
6799
6800 if ( pCtx->eflags.Bits.u1AF
6801 || (uInputAL & 0xf) >= 10)
6802 {
6803 pCtx->eflags.Bits.u1AF = 1;
6804 if (uInputAL < 6)
6805 pCtx->eflags.Bits.u1CF = 1;
6806 pCtx->al = uInputAL - 6;
6807 }
6808 else
6809 {
6810 pCtx->eflags.Bits.u1AF = 0;
6811 pCtx->eflags.Bits.u1CF = 0;
6812 }
6813
6814 if (uInputAL >= 0x9a || fCarry)
6815 {
6816 pCtx->al -= 0x60;
6817 pCtx->eflags.Bits.u1CF = 1;
6818 }
6819
6820 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6821 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6822 return VINF_SUCCESS;
6823}
6824
6825
6826/**
6827 * Implements 'AAA'.
6828 */
6829IEM_CIMPL_DEF_0(iemCImpl_aaa)
6830{
6831 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6832
6833 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
6834 {
6835 if ( pCtx->eflags.Bits.u1AF
6836 || (pCtx->ax & 0xf) >= 10)
6837 {
6838 iemAImpl_add_u16(&pCtx->ax, 0x106, &pCtx->eflags.u32);
6839 pCtx->eflags.Bits.u1AF = 1;
6840 pCtx->eflags.Bits.u1CF = 1;
6841#ifdef IEM_VERIFICATION_MODE_FULL
6842 pVCpu->iem.s.fUndefinedEFlags |= X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF;
6843#endif
6844 }
6845 else
6846 {
6847 iemHlpUpdateArithEFlagsU16(pVCpu, pCtx->ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6848 pCtx->eflags.Bits.u1AF = 0;
6849 pCtx->eflags.Bits.u1CF = 0;
6850 }
6851 pCtx->ax &= UINT16_C(0xff0f);
6852 }
6853 else
6854 {
6855 if ( pCtx->eflags.Bits.u1AF
6856 || (pCtx->ax & 0xf) >= 10)
6857 {
6858 pCtx->ax += UINT16_C(0x106);
6859 pCtx->eflags.Bits.u1AF = 1;
6860 pCtx->eflags.Bits.u1CF = 1;
6861 }
6862 else
6863 {
6864 pCtx->eflags.Bits.u1AF = 0;
6865 pCtx->eflags.Bits.u1CF = 0;
6866 }
6867 pCtx->ax &= UINT16_C(0xff0f);
6868 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6869 }
6870
6871 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6872 return VINF_SUCCESS;
6873}
6874
6875
6876/**
6877 * Implements 'AAS'.
6878 */
6879IEM_CIMPL_DEF_0(iemCImpl_aas)
6880{
6881 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6882
6883 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
6884 {
6885 if ( pCtx->eflags.Bits.u1AF
6886 || (pCtx->ax & 0xf) >= 10)
6887 {
6888 iemAImpl_sub_u16(&pCtx->ax, 0x106, &pCtx->eflags.u32);
6889 pCtx->eflags.Bits.u1AF = 1;
6890 pCtx->eflags.Bits.u1CF = 1;
6891#ifdef IEM_VERIFICATION_MODE_FULL
6892 pVCpu->iem.s.fUndefinedEFlags |= X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF;
6893#endif
6894 }
6895 else
6896 {
6897 iemHlpUpdateArithEFlagsU16(pVCpu, pCtx->ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6898 pCtx->eflags.Bits.u1AF = 0;
6899 pCtx->eflags.Bits.u1CF = 0;
6900 }
6901 pCtx->ax &= UINT16_C(0xff0f);
6902 }
6903 else
6904 {
6905 if ( pCtx->eflags.Bits.u1AF
6906 || (pCtx->ax & 0xf) >= 10)
6907 {
6908 pCtx->ax -= UINT16_C(0x106);
6909 pCtx->eflags.Bits.u1AF = 1;
6910 pCtx->eflags.Bits.u1CF = 1;
6911 }
6912 else
6913 {
6914 pCtx->eflags.Bits.u1AF = 0;
6915 pCtx->eflags.Bits.u1CF = 0;
6916 }
6917 pCtx->ax &= UINT16_C(0xff0f);
6918 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6919 }
6920
6921 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6922 return VINF_SUCCESS;
6923}
6924
6925
6926/**
6927 * Implements the 16-bit version of 'BOUND'.
6928 *
6929 * @note We have separate 16-bit and 32-bit variants of this function due to
6930 * the decoder using unsigned parameters, whereas we want signed one to
6931 * do the job. This is significant for a recompiler.
6932 */
6933IEM_CIMPL_DEF_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound)
6934{
6935 /*
6936 * Check if the index is inside the bounds, otherwise raise #BR.
6937 */
6938 if ( idxArray >= idxLowerBound
6939 && idxArray <= idxUpperBound)
6940 {
6941 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6942 return VINF_SUCCESS;
6943 }
6944
6945 return iemRaiseBoundRangeExceeded(pVCpu);
6946}
6947
6948
6949/**
6950 * Implements the 32-bit version of 'BOUND'.
6951 */
6952IEM_CIMPL_DEF_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound)
6953{
6954 /*
6955 * Check if the index is inside the bounds, otherwise raise #BR.
6956 */
6957 if ( idxArray >= idxLowerBound
6958 && idxArray <= idxUpperBound)
6959 {
6960 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6961 return VINF_SUCCESS;
6962 }
6963
6964 return iemRaiseBoundRangeExceeded(pVCpu);
6965}
6966
6967
6968
6969/*
6970 * Instantiate the various string operation combinations.
6971 */
6972#define OP_SIZE 8
6973#define ADDR_SIZE 16
6974#include "IEMAllCImplStrInstr.cpp.h"
6975#define OP_SIZE 8
6976#define ADDR_SIZE 32
6977#include "IEMAllCImplStrInstr.cpp.h"
6978#define OP_SIZE 8
6979#define ADDR_SIZE 64
6980#include "IEMAllCImplStrInstr.cpp.h"
6981
6982#define OP_SIZE 16
6983#define ADDR_SIZE 16
6984#include "IEMAllCImplStrInstr.cpp.h"
6985#define OP_SIZE 16
6986#define ADDR_SIZE 32
6987#include "IEMAllCImplStrInstr.cpp.h"
6988#define OP_SIZE 16
6989#define ADDR_SIZE 64
6990#include "IEMAllCImplStrInstr.cpp.h"
6991
6992#define OP_SIZE 32
6993#define ADDR_SIZE 16
6994#include "IEMAllCImplStrInstr.cpp.h"
6995#define OP_SIZE 32
6996#define ADDR_SIZE 32
6997#include "IEMAllCImplStrInstr.cpp.h"
6998#define OP_SIZE 32
6999#define ADDR_SIZE 64
7000#include "IEMAllCImplStrInstr.cpp.h"
7001
7002#define OP_SIZE 64
7003#define ADDR_SIZE 32
7004#include "IEMAllCImplStrInstr.cpp.h"
7005#define OP_SIZE 64
7006#define ADDR_SIZE 64
7007#include "IEMAllCImplStrInstr.cpp.h"
7008
7009
7010/**
7011 * Implements 'XGETBV'.
7012 */
7013IEM_CIMPL_DEF_0(iemCImpl_xgetbv)
7014{
7015 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7016 if (pCtx->cr4 & X86_CR4_OSXSAVE)
7017 {
7018 uint32_t uEcx = pCtx->ecx;
7019 switch (uEcx)
7020 {
7021 case 0:
7022 break;
7023
7024 case 1: /** @todo Implement XCR1 support. */
7025 default:
7026 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx));
7027 return iemRaiseGeneralProtectionFault0(pVCpu);
7028
7029 }
7030 pCtx->rax = RT_LO_U32(pCtx->aXcr[uEcx]);
7031 pCtx->rdx = RT_HI_U32(pCtx->aXcr[uEcx]);
7032
7033 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7034 return VINF_SUCCESS;
7035 }
7036 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n"));
7037 return iemRaiseUndefinedOpcode(pVCpu);
7038}
7039
7040
7041/**
7042 * Implements 'XSETBV'.
7043 */
7044IEM_CIMPL_DEF_0(iemCImpl_xsetbv)
7045{
7046 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7047 if (pCtx->cr4 & X86_CR4_OSXSAVE)
7048 {
7049 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
7050 {
7051 Log2(("xsetbv: Guest intercept -> #VMEXIT\n"));
7052 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7053 }
7054
7055 if (pVCpu->iem.s.uCpl == 0)
7056 {
7057 uint32_t uEcx = pCtx->ecx;
7058 uint64_t uNewValue = RT_MAKE_U64(pCtx->eax, pCtx->edx);
7059 switch (uEcx)
7060 {
7061 case 0:
7062 {
7063 int rc = CPUMSetGuestXcr0(pVCpu, uNewValue);
7064 if (rc == VINF_SUCCESS)
7065 break;
7066 Assert(rc == VERR_CPUM_RAISE_GP_0);
7067 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
7068 return iemRaiseGeneralProtectionFault0(pVCpu);
7069 }
7070
7071 case 1: /** @todo Implement XCR1 support. */
7072 default:
7073 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
7074 return iemRaiseGeneralProtectionFault0(pVCpu);
7075
7076 }
7077
7078 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7079 return VINF_SUCCESS;
7080 }
7081
7082 Log(("xsetbv cpl=%u -> GP(0)\n", pVCpu->iem.s.uCpl));
7083 return iemRaiseGeneralProtectionFault0(pVCpu);
7084 }
7085 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n"));
7086 return iemRaiseUndefinedOpcode(pVCpu);
7087}
7088
7089#ifdef IN_RING3
7090
7091/** Argument package for iemCImpl_cmpxchg16b_fallback_rendezvous_callback. */
7092struct IEMCIMPLCX16ARGS
7093{
7094 PRTUINT128U pu128Dst;
7095 PRTUINT128U pu128RaxRdx;
7096 PRTUINT128U pu128RbxRcx;
7097 uint32_t *pEFlags;
7098# ifdef VBOX_STRICT
7099 uint32_t cCalls;
7100# endif
7101};
7102
7103/**
7104 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
7105 * Worker for iemCImpl_cmpxchg16b_fallback_rendezvous}
7106 */
7107static DECLCALLBACK(VBOXSTRICTRC) iemCImpl_cmpxchg16b_fallback_rendezvous_callback(PVM pVM, PVMCPU pVCpu, void *pvUser)
7108{
7109 RT_NOREF(pVM, pVCpu);
7110 struct IEMCIMPLCX16ARGS *pArgs = (struct IEMCIMPLCX16ARGS *)pvUser;
7111# ifdef VBOX_STRICT
7112 Assert(pArgs->cCalls == 0);
7113 pArgs->cCalls++;
7114# endif
7115
7116 iemAImpl_cmpxchg16b_fallback(pArgs->pu128Dst, pArgs->pu128RaxRdx, pArgs->pu128RbxRcx, pArgs->pEFlags);
7117 return VINF_SUCCESS;
7118}
7119
7120#endif /* IN_RING3 */
7121
7122/**
7123 * Implements 'CMPXCHG16B' fallback using rendezvous.
7124 */
7125IEM_CIMPL_DEF_4(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
7126 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags)
7127{
7128#ifdef IN_RING3
7129 struct IEMCIMPLCX16ARGS Args;
7130 Args.pu128Dst = pu128Dst;
7131 Args.pu128RaxRdx = pu128RaxRdx;
7132 Args.pu128RbxRcx = pu128RbxRcx;
7133 Args.pEFlags = pEFlags;
7134# ifdef VBOX_STRICT
7135 Args.cCalls = 0;
7136# endif
7137 VBOXSTRICTRC rcStrict = VMMR3EmtRendezvous(pVCpu->CTX_SUFF(pVM), VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
7138 iemCImpl_cmpxchg16b_fallback_rendezvous_callback, &Args);
7139 Assert(Args.cCalls == 1);
7140 if (rcStrict == VINF_SUCCESS)
7141 {
7142 /* Duplicated tail code. */
7143 rcStrict = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_RW);
7144 if (rcStrict == VINF_SUCCESS)
7145 {
7146 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
7147 pCtx->eflags.u = *pEFlags; /* IEM_MC_COMMIT_EFLAGS */
7148 if (!(*pEFlags & X86_EFL_ZF))
7149 {
7150 pCtx->rax = pu128RaxRdx->s.Lo;
7151 pCtx->rdx = pu128RaxRdx->s.Hi;
7152 }
7153 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7154 }
7155 }
7156 return rcStrict;
7157#else
7158 RT_NOREF(pVCpu, cbInstr, pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags);
7159 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; /* This should get us to ring-3 for now. Should perhaps be replaced later. */
7160#endif
7161}
7162
7163
7164/**
7165 * Implements 'CLFLUSH' and 'CLFLUSHOPT'.
7166 *
7167 * This is implemented in C because it triggers a load like behviour without
7168 * actually reading anything. Since that's not so common, it's implemented
7169 * here.
7170 *
7171 * @param iEffSeg The effective segment.
7172 * @param GCPtrEff The address of the image.
7173 */
7174IEM_CIMPL_DEF_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
7175{
7176 /*
7177 * Pretend to do a load w/o reading (see also iemCImpl_monitor and iemMemMap).
7178 */
7179 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrEff);
7180 if (rcStrict == VINF_SUCCESS)
7181 {
7182 RTGCPHYS GCPhysMem;
7183 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrEff, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
7184 if (rcStrict == VINF_SUCCESS)
7185 {
7186 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7187 return VINF_SUCCESS;
7188 }
7189 }
7190
7191 return rcStrict;
7192}
7193
7194
7195/**
7196 * Implements 'FINIT' and 'FNINIT'.
7197 *
7198 * @param fCheckXcpts Whether to check for umasked pending exceptions or
7199 * not.
7200 */
7201IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
7202{
7203 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7204
7205 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
7206 return iemRaiseDeviceNotAvailable(pVCpu);
7207
7208 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
7209 if (fCheckXcpts && TODO )
7210 return iemRaiseMathFault(pVCpu);
7211 */
7212
7213 PX86XSAVEAREA pXState = pCtx->CTX_SUFF(pXState);
7214 pXState->x87.FCW = 0x37f;
7215 pXState->x87.FSW = 0;
7216 pXState->x87.FTW = 0x00; /* 0 - empty. */
7217 pXState->x87.FPUDP = 0;
7218 pXState->x87.DS = 0; //??
7219 pXState->x87.Rsrvd2= 0;
7220 pXState->x87.FPUIP = 0;
7221 pXState->x87.CS = 0; //??
7222 pXState->x87.Rsrvd1= 0;
7223 pXState->x87.FOP = 0;
7224
7225 iemHlpUsedFpu(pVCpu);
7226 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7227 return VINF_SUCCESS;
7228}
7229
7230
7231/**
7232 * Implements 'FXSAVE'.
7233 *
7234 * @param iEffSeg The effective segment.
7235 * @param GCPtrEff The address of the image.
7236 * @param enmEffOpSize The operand size (only REX.W really matters).
7237 */
7238IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
7239{
7240 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7241
7242 /*
7243 * Raise exceptions.
7244 */
7245 if (pCtx->cr0 & X86_CR0_EM)
7246 return iemRaiseUndefinedOpcode(pVCpu);
7247 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
7248 return iemRaiseDeviceNotAvailable(pVCpu);
7249 if (GCPtrEff & 15)
7250 {
7251 /** @todo CPU/VM detection possible! \#AC might not be signal for
7252 * all/any misalignment sizes, intel says its an implementation detail. */
7253 if ( (pCtx->cr0 & X86_CR0_AM)
7254 && pCtx->eflags.Bits.u1AC
7255 && pVCpu->iem.s.uCpl == 3)
7256 return iemRaiseAlignmentCheckException(pVCpu);
7257 return iemRaiseGeneralProtectionFault0(pVCpu);
7258 }
7259
7260 /*
7261 * Access the memory.
7262 */
7263 void *pvMem512;
7264 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7265 if (rcStrict != VINF_SUCCESS)
7266 return rcStrict;
7267 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
7268 PCX86FXSTATE pSrc = &pCtx->CTX_SUFF(pXState)->x87;
7269
7270 /*
7271 * Store the registers.
7272 */
7273 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
7274 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
7275
7276 /* common for all formats */
7277 pDst->FCW = pSrc->FCW;
7278 pDst->FSW = pSrc->FSW;
7279 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
7280 pDst->FOP = pSrc->FOP;
7281 pDst->MXCSR = pSrc->MXCSR;
7282 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
7283 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
7284 {
7285 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
7286 * them for now... */
7287 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
7288 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
7289 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
7290 pDst->aRegs[i].au32[3] = 0;
7291 }
7292
7293 /* FPU IP, CS, DP and DS. */
7294 pDst->FPUIP = pSrc->FPUIP;
7295 pDst->CS = pSrc->CS;
7296 pDst->FPUDP = pSrc->FPUDP;
7297 pDst->DS = pSrc->DS;
7298 if (enmEffOpSize == IEMMODE_64BIT)
7299 {
7300 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
7301 pDst->Rsrvd1 = pSrc->Rsrvd1;
7302 pDst->Rsrvd2 = pSrc->Rsrvd2;
7303 pDst->au32RsrvdForSoftware[0] = 0;
7304 }
7305 else
7306 {
7307 pDst->Rsrvd1 = 0;
7308 pDst->Rsrvd2 = 0;
7309 pDst->au32RsrvdForSoftware[0] = X86_FXSTATE_RSVD_32BIT_MAGIC;
7310 }
7311
7312 /* XMM registers. */
7313 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
7314 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
7315 || pVCpu->iem.s.uCpl != 0)
7316 {
7317 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7318 for (uint32_t i = 0; i < cXmmRegs; i++)
7319 pDst->aXMM[i] = pSrc->aXMM[i];
7320 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
7321 * right? */
7322 }
7323
7324 /*
7325 * Commit the memory.
7326 */
7327 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7328 if (rcStrict != VINF_SUCCESS)
7329 return rcStrict;
7330
7331 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7332 return VINF_SUCCESS;
7333}
7334
7335
7336/**
7337 * Implements 'FXRSTOR'.
7338 *
7339 * @param GCPtrEff The address of the image.
7340 * @param enmEffOpSize The operand size (only REX.W really matters).
7341 */
7342IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
7343{
7344 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7345
7346 /*
7347 * Raise exceptions.
7348 */
7349 if (pCtx->cr0 & X86_CR0_EM)
7350 return iemRaiseUndefinedOpcode(pVCpu);
7351 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
7352 return iemRaiseDeviceNotAvailable(pVCpu);
7353 if (GCPtrEff & 15)
7354 {
7355 /** @todo CPU/VM detection possible! \#AC might not be signal for
7356 * all/any misalignment sizes, intel says its an implementation detail. */
7357 if ( (pCtx->cr0 & X86_CR0_AM)
7358 && pCtx->eflags.Bits.u1AC
7359 && pVCpu->iem.s.uCpl == 3)
7360 return iemRaiseAlignmentCheckException(pVCpu);
7361 return iemRaiseGeneralProtectionFault0(pVCpu);
7362 }
7363
7364 /*
7365 * Access the memory.
7366 */
7367 void *pvMem512;
7368 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
7369 if (rcStrict != VINF_SUCCESS)
7370 return rcStrict;
7371 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
7372 PX86FXSTATE pDst = &pCtx->CTX_SUFF(pXState)->x87;
7373
7374 /*
7375 * Check the state for stuff which will #GP(0).
7376 */
7377 uint32_t const fMXCSR = pSrc->MXCSR;
7378 uint32_t const fMXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
7379 if (fMXCSR & ~fMXCSR_MASK)
7380 {
7381 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
7382 return iemRaiseGeneralProtectionFault0(pVCpu);
7383 }
7384
7385 /*
7386 * Load the registers.
7387 */
7388 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
7389 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
7390
7391 /* common for all formats */
7392 pDst->FCW = pSrc->FCW;
7393 pDst->FSW = pSrc->FSW;
7394 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
7395 pDst->FOP = pSrc->FOP;
7396 pDst->MXCSR = fMXCSR;
7397 /* (MXCSR_MASK is read-only) */
7398 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
7399 {
7400 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
7401 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
7402 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
7403 pDst->aRegs[i].au32[3] = 0;
7404 }
7405
7406 /* FPU IP, CS, DP and DS. */
7407 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7408 {
7409 pDst->FPUIP = pSrc->FPUIP;
7410 pDst->CS = pSrc->CS;
7411 pDst->Rsrvd1 = pSrc->Rsrvd1;
7412 pDst->FPUDP = pSrc->FPUDP;
7413 pDst->DS = pSrc->DS;
7414 pDst->Rsrvd2 = pSrc->Rsrvd2;
7415 }
7416 else
7417 {
7418 pDst->FPUIP = pSrc->FPUIP;
7419 pDst->CS = pSrc->CS;
7420 pDst->Rsrvd1 = 0;
7421 pDst->FPUDP = pSrc->FPUDP;
7422 pDst->DS = pSrc->DS;
7423 pDst->Rsrvd2 = 0;
7424 }
7425
7426 /* XMM registers. */
7427 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
7428 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
7429 || pVCpu->iem.s.uCpl != 0)
7430 {
7431 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7432 for (uint32_t i = 0; i < cXmmRegs; i++)
7433 pDst->aXMM[i] = pSrc->aXMM[i];
7434 }
7435
7436 /*
7437 * Commit the memory.
7438 */
7439 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
7440 if (rcStrict != VINF_SUCCESS)
7441 return rcStrict;
7442
7443 iemHlpUsedFpu(pVCpu);
7444 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7445 return VINF_SUCCESS;
7446}
7447
7448
7449/**
7450 * Implements 'XSAVE'.
7451 *
7452 * @param iEffSeg The effective segment.
7453 * @param GCPtrEff The address of the image.
7454 * @param enmEffOpSize The operand size (only REX.W really matters).
7455 */
7456IEM_CIMPL_DEF_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
7457{
7458 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7459
7460 /*
7461 * Raise exceptions.
7462 */
7463 if (!(pCtx->cr4 & X86_CR4_OSXSAVE))
7464 return iemRaiseUndefinedOpcode(pVCpu);
7465 if (pCtx->cr0 & X86_CR0_TS)
7466 return iemRaiseDeviceNotAvailable(pVCpu);
7467 if (GCPtrEff & 63)
7468 {
7469 /** @todo CPU/VM detection possible! \#AC might not be signal for
7470 * all/any misalignment sizes, intel says its an implementation detail. */
7471 if ( (pCtx->cr0 & X86_CR0_AM)
7472 && pCtx->eflags.Bits.u1AC
7473 && pVCpu->iem.s.uCpl == 3)
7474 return iemRaiseAlignmentCheckException(pVCpu);
7475 return iemRaiseGeneralProtectionFault0(pVCpu);
7476 }
7477
7478 /*
7479 * Calc the requested mask
7480 */
7481 uint64_t const fReqComponents = RT_MAKE_U64(pCtx->eax, pCtx->edx) & pCtx->aXcr[0];
7482 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
7483 uint64_t const fXInUse = pCtx->aXcr[0];
7484
7485/** @todo figure out the exact protocol for the memory access. Currently we
7486 * just need this crap to work halfways to make it possible to test
7487 * AVX instructions. */
7488/** @todo figure out the XINUSE and XMODIFIED */
7489
7490 /*
7491 * Access the x87 memory state.
7492 */
7493 /* The x87+SSE state. */
7494 void *pvMem512;
7495 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7496 if (rcStrict != VINF_SUCCESS)
7497 return rcStrict;
7498 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
7499 PCX86FXSTATE pSrc = &pCtx->CTX_SUFF(pXState)->x87;
7500
7501 /* The header. */
7502 PX86XSAVEHDR pHdr;
7503 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW);
7504 if (rcStrict != VINF_SUCCESS)
7505 return rcStrict;
7506
7507 /*
7508 * Store the X87 state.
7509 */
7510 if (fReqComponents & XSAVE_C_X87)
7511 {
7512 /* common for all formats */
7513 pDst->FCW = pSrc->FCW;
7514 pDst->FSW = pSrc->FSW;
7515 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
7516 pDst->FOP = pSrc->FOP;
7517 pDst->FPUIP = pSrc->FPUIP;
7518 pDst->CS = pSrc->CS;
7519 pDst->FPUDP = pSrc->FPUDP;
7520 pDst->DS = pSrc->DS;
7521 if (enmEffOpSize == IEMMODE_64BIT)
7522 {
7523 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
7524 pDst->Rsrvd1 = pSrc->Rsrvd1;
7525 pDst->Rsrvd2 = pSrc->Rsrvd2;
7526 pDst->au32RsrvdForSoftware[0] = 0;
7527 }
7528 else
7529 {
7530 pDst->Rsrvd1 = 0;
7531 pDst->Rsrvd2 = 0;
7532 pDst->au32RsrvdForSoftware[0] = X86_FXSTATE_RSVD_32BIT_MAGIC;
7533 }
7534 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
7535 {
7536 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
7537 * them for now... */
7538 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
7539 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
7540 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
7541 pDst->aRegs[i].au32[3] = 0;
7542 }
7543
7544 }
7545
7546 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
7547 {
7548 pDst->MXCSR = pSrc->MXCSR;
7549 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
7550 }
7551
7552 if (fReqComponents & XSAVE_C_SSE)
7553 {
7554 /* XMM registers. */
7555 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7556 for (uint32_t i = 0; i < cXmmRegs; i++)
7557 pDst->aXMM[i] = pSrc->aXMM[i];
7558 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
7559 * right? */
7560 }
7561
7562 /* Commit the x87 state bits. (probably wrong) */
7563 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7564 if (rcStrict != VINF_SUCCESS)
7565 return rcStrict;
7566
7567 /*
7568 * Store AVX state.
7569 */
7570 if (fReqComponents & XSAVE_C_YMM)
7571 {
7572 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
7573 AssertLogRelReturn(pCtx->aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
7574 PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI);
7575 PX86XSAVEYMMHI pCompDst;
7576 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + pCtx->aoffXState[XSAVE_C_YMM_BIT],
7577 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7578 if (rcStrict != VINF_SUCCESS)
7579 return rcStrict;
7580
7581 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7582 for (uint32_t i = 0; i < cXmmRegs; i++)
7583 pCompDst->aYmmHi[i] = pCompSrc->aYmmHi[i];
7584
7585 rcStrict = iemMemCommitAndUnmap(pVCpu, pCompDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7586 if (rcStrict != VINF_SUCCESS)
7587 return rcStrict;
7588 }
7589
7590 /*
7591 * Update the header.
7592 */
7593 pHdr->bmXState = (pHdr->bmXState & ~fReqComponents)
7594 | (fReqComponents & fXInUse);
7595
7596 rcStrict = iemMemCommitAndUnmap(pVCpu, pHdr, IEM_ACCESS_DATA_RW);
7597 if (rcStrict != VINF_SUCCESS)
7598 return rcStrict;
7599
7600 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7601 return VINF_SUCCESS;
7602}
7603
7604
7605/**
7606 * Implements 'XRSTOR'.
7607 *
7608 * @param iEffSeg The effective segment.
7609 * @param GCPtrEff The address of the image.
7610 * @param enmEffOpSize The operand size (only REX.W really matters).
7611 */
7612IEM_CIMPL_DEF_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
7613{
7614 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7615
7616 /*
7617 * Raise exceptions.
7618 */
7619 if (!(pCtx->cr4 & X86_CR4_OSXSAVE))
7620 return iemRaiseUndefinedOpcode(pVCpu);
7621 if (pCtx->cr0 & X86_CR0_TS)
7622 return iemRaiseDeviceNotAvailable(pVCpu);
7623 if (GCPtrEff & 63)
7624 {
7625 /** @todo CPU/VM detection possible! \#AC might not be signal for
7626 * all/any misalignment sizes, intel says its an implementation detail. */
7627 if ( (pCtx->cr0 & X86_CR0_AM)
7628 && pCtx->eflags.Bits.u1AC
7629 && pVCpu->iem.s.uCpl == 3)
7630 return iemRaiseAlignmentCheckException(pVCpu);
7631 return iemRaiseGeneralProtectionFault0(pVCpu);
7632 }
7633
7634/** @todo figure out the exact protocol for the memory access. Currently we
7635 * just need this crap to work halfways to make it possible to test
7636 * AVX instructions. */
7637/** @todo figure out the XINUSE and XMODIFIED */
7638
7639 /*
7640 * Access the x87 memory state.
7641 */
7642 /* The x87+SSE state. */
7643 void *pvMem512;
7644 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
7645 if (rcStrict != VINF_SUCCESS)
7646 return rcStrict;
7647 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
7648 PX86FXSTATE pDst = &pCtx->CTX_SUFF(pXState)->x87;
7649
7650 /*
7651 * Calc the requested mask
7652 */
7653 PX86XSAVEHDR pHdrDst = &pCtx->CTX_SUFF(pXState)->Hdr;
7654 PCX86XSAVEHDR pHdrSrc;
7655 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_R);
7656 if (rcStrict != VINF_SUCCESS)
7657 return rcStrict;
7658
7659 uint64_t const fReqComponents = RT_MAKE_U64(pCtx->eax, pCtx->edx) & pCtx->aXcr[0];
7660 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
7661 //uint64_t const fXInUse = pCtx->aXcr[0];
7662 uint64_t const fRstorMask = pHdrSrc->bmXState;
7663 uint64_t const fCompMask = pHdrSrc->bmXComp;
7664
7665 AssertLogRelReturn(!(fCompMask & XSAVE_C_X), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
7666
7667 uint32_t const cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7668
7669 /* We won't need this any longer. */
7670 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pHdrSrc, IEM_ACCESS_DATA_R);
7671 if (rcStrict != VINF_SUCCESS)
7672 return rcStrict;
7673
7674 /*
7675 * Store the X87 state.
7676 */
7677 if (fReqComponents & XSAVE_C_X87)
7678 {
7679 if (fRstorMask & XSAVE_C_X87)
7680 {
7681 pDst->FCW = pSrc->FCW;
7682 pDst->FSW = pSrc->FSW;
7683 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
7684 pDst->FOP = pSrc->FOP;
7685 pDst->FPUIP = pSrc->FPUIP;
7686 pDst->CS = pSrc->CS;
7687 pDst->FPUDP = pSrc->FPUDP;
7688 pDst->DS = pSrc->DS;
7689 if (enmEffOpSize == IEMMODE_64BIT)
7690 {
7691 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
7692 pDst->Rsrvd1 = pSrc->Rsrvd1;
7693 pDst->Rsrvd2 = pSrc->Rsrvd2;
7694 }
7695 else
7696 {
7697 pDst->Rsrvd1 = 0;
7698 pDst->Rsrvd2 = 0;
7699 }
7700 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
7701 {
7702 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
7703 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
7704 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
7705 pDst->aRegs[i].au32[3] = 0;
7706 }
7707 }
7708 else
7709 {
7710 pDst->FCW = 0x37f;
7711 pDst->FSW = 0;
7712 pDst->FTW = 0x00; /* 0 - empty. */
7713 pDst->FPUDP = 0;
7714 pDst->DS = 0; //??
7715 pDst->Rsrvd2= 0;
7716 pDst->FPUIP = 0;
7717 pDst->CS = 0; //??
7718 pDst->Rsrvd1= 0;
7719 pDst->FOP = 0;
7720 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
7721 {
7722 pDst->aRegs[i].au32[0] = 0;
7723 pDst->aRegs[i].au32[1] = 0;
7724 pDst->aRegs[i].au32[2] = 0;
7725 pDst->aRegs[i].au32[3] = 0;
7726 }
7727 }
7728 pHdrDst->bmXState |= XSAVE_C_X87; /* playing safe for now */
7729 }
7730
7731 /* MXCSR */
7732 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
7733 {
7734 if (fRstorMask & (XSAVE_C_SSE | XSAVE_C_YMM))
7735 pDst->MXCSR = pSrc->MXCSR;
7736 else
7737 pDst->MXCSR = 0x1f80;
7738 }
7739
7740 /* XMM registers. */
7741 if (fReqComponents & XSAVE_C_SSE)
7742 {
7743 if (fRstorMask & XSAVE_C_SSE)
7744 {
7745 for (uint32_t i = 0; i < cXmmRegs; i++)
7746 pDst->aXMM[i] = pSrc->aXMM[i];
7747 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
7748 * right? */
7749 }
7750 else
7751 {
7752 for (uint32_t i = 0; i < cXmmRegs; i++)
7753 {
7754 pDst->aXMM[i].au64[0] = 0;
7755 pDst->aXMM[i].au64[1] = 0;
7756 }
7757 }
7758 pHdrDst->bmXState |= XSAVE_C_SSE; /* playing safe for now */
7759 }
7760
7761 /* Unmap the x87 state bits (so we've don't run out of mapping). */
7762 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
7763 if (rcStrict != VINF_SUCCESS)
7764 return rcStrict;
7765
7766 /*
7767 * Restore AVX state.
7768 */
7769 if (fReqComponents & XSAVE_C_YMM)
7770 {
7771 AssertLogRelReturn(pCtx->aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
7772 PX86XSAVEYMMHI pCompDst = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_YMM_BIT, PX86XSAVEYMMHI);
7773
7774 if (fRstorMask & XSAVE_C_YMM)
7775 {
7776 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
7777 PCX86XSAVEYMMHI pCompSrc;
7778 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, sizeof(*pCompDst),
7779 iEffSeg, GCPtrEff + pCtx->aoffXState[XSAVE_C_YMM_BIT], IEM_ACCESS_DATA_R);
7780 if (rcStrict != VINF_SUCCESS)
7781 return rcStrict;
7782
7783 for (uint32_t i = 0; i < cXmmRegs; i++)
7784 {
7785 pCompDst->aYmmHi[i].au64[0] = pCompSrc->aYmmHi[i].au64[0];
7786 pCompDst->aYmmHi[i].au64[1] = pCompSrc->aYmmHi[i].au64[1];
7787 }
7788
7789 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pCompSrc, IEM_ACCESS_DATA_R);
7790 if (rcStrict != VINF_SUCCESS)
7791 return rcStrict;
7792 }
7793 else
7794 {
7795 for (uint32_t i = 0; i < cXmmRegs; i++)
7796 {
7797 pCompDst->aYmmHi[i].au64[0] = 0;
7798 pCompDst->aYmmHi[i].au64[1] = 0;
7799 }
7800 }
7801 pHdrDst->bmXState |= XSAVE_C_YMM; /* playing safe for now */
7802 }
7803
7804 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7805 return VINF_SUCCESS;
7806}
7807
7808
7809
7810
7811/**
7812 * Implements 'STMXCSR'.
7813 *
7814 * @param GCPtrEff The address of the image.
7815 */
7816IEM_CIMPL_DEF_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
7817{
7818 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7819
7820 /*
7821 * Raise exceptions.
7822 */
7823 if ( !(pCtx->cr0 & X86_CR0_EM)
7824 && (pCtx->cr4 & X86_CR4_OSFXSR))
7825 {
7826 if (!(pCtx->cr0 & X86_CR0_TS))
7827 {
7828 /*
7829 * Do the job.
7830 */
7831 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pCtx->CTX_SUFF(pXState)->x87.MXCSR);
7832 if (rcStrict == VINF_SUCCESS)
7833 {
7834 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7835 return VINF_SUCCESS;
7836 }
7837 return rcStrict;
7838 }
7839 return iemRaiseDeviceNotAvailable(pVCpu);
7840 }
7841 return iemRaiseUndefinedOpcode(pVCpu);
7842}
7843
7844
7845/**
7846 * Implements 'VSTMXCSR'.
7847 *
7848 * @param GCPtrEff The address of the image.
7849 */
7850IEM_CIMPL_DEF_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
7851{
7852 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7853
7854 /*
7855 * Raise exceptions.
7856 */
7857 if ( ( !IEM_IS_GUEST_CPU_AMD(pVCpu)
7858 ? (pCtx->aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM)
7859 : !(pCtx->cr0 & X86_CR0_EM)) /* AMD Jaguar CPU (f0x16,m0,s1) behaviour */
7860 && (pCtx->cr4 & X86_CR4_OSXSAVE))
7861 {
7862 if (!(pCtx->cr0 & X86_CR0_TS))
7863 {
7864 /*
7865 * Do the job.
7866 */
7867 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pCtx->CTX_SUFF(pXState)->x87.MXCSR);
7868 if (rcStrict == VINF_SUCCESS)
7869 {
7870 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7871 return VINF_SUCCESS;
7872 }
7873 return rcStrict;
7874 }
7875 return iemRaiseDeviceNotAvailable(pVCpu);
7876 }
7877 return iemRaiseUndefinedOpcode(pVCpu);
7878}
7879
7880
7881/**
7882 * Implements 'LDMXCSR'.
7883 *
7884 * @param GCPtrEff The address of the image.
7885 */
7886IEM_CIMPL_DEF_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
7887{
7888 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7889
7890 /*
7891 * Raise exceptions.
7892 */
7893 /** @todo testcase - order of LDMXCSR faults. Does \#PF, \#GP and \#SS
7894 * happen after or before \#UD and \#EM? */
7895 if ( !(pCtx->cr0 & X86_CR0_EM)
7896 && (pCtx->cr4 & X86_CR4_OSFXSR))
7897 {
7898 if (!(pCtx->cr0 & X86_CR0_TS))
7899 {
7900 /*
7901 * Do the job.
7902 */
7903 uint32_t fNewMxCsr;
7904 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, &fNewMxCsr, iEffSeg, GCPtrEff);
7905 if (rcStrict == VINF_SUCCESS)
7906 {
7907 uint32_t const fMxCsrMask = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
7908 if (!(fNewMxCsr & ~fMxCsrMask))
7909 {
7910 pCtx->CTX_SUFF(pXState)->x87.MXCSR = fNewMxCsr;
7911 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7912 return VINF_SUCCESS;
7913 }
7914 Log(("lddmxcsr: New MXCSR=%#RX32 & ~MASK=%#RX32 = %#RX32 -> #GP(0)\n",
7915 fNewMxCsr, fMxCsrMask, fNewMxCsr & ~fMxCsrMask));
7916 return iemRaiseGeneralProtectionFault0(pVCpu);
7917 }
7918 return rcStrict;
7919 }
7920 return iemRaiseDeviceNotAvailable(pVCpu);
7921 }
7922 return iemRaiseUndefinedOpcode(pVCpu);
7923}
7924
7925
7926/**
7927 * Commmon routine for fnstenv and fnsave.
7928 *
7929 * @param uPtr Where to store the state.
7930 * @param pCtx The CPU context.
7931 */
7932static void iemCImplCommonFpuStoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
7933{
7934 PCX86FXSTATE pSrcX87 = &pCtx->CTX_SUFF(pXState)->x87;
7935 if (enmEffOpSize == IEMMODE_16BIT)
7936 {
7937 uPtr.pu16[0] = pSrcX87->FCW;
7938 uPtr.pu16[1] = pSrcX87->FSW;
7939 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87);
7940 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7941 {
7942 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
7943 * protected mode or long mode and we save it in real mode? And vice
7944 * versa? And with 32-bit operand size? I think CPU is storing the
7945 * effective address ((CS << 4) + IP) in the offset register and not
7946 * doing any address calculations here. */
7947 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP;
7948 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP;
7949 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP;
7950 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000);
7951 }
7952 else
7953 {
7954 uPtr.pu16[3] = pSrcX87->FPUIP;
7955 uPtr.pu16[4] = pSrcX87->CS;
7956 uPtr.pu16[5] = pSrcX87->FPUDP;
7957 uPtr.pu16[6] = pSrcX87->DS;
7958 }
7959 }
7960 else
7961 {
7962 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
7963 uPtr.pu16[0*2] = pSrcX87->FCW;
7964 uPtr.pu16[0*2+1] = 0xffff; /* (0xffff observed on intel skylake.) */
7965 uPtr.pu16[1*2] = pSrcX87->FSW;
7966 uPtr.pu16[1*2+1] = 0xffff;
7967 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87);
7968 uPtr.pu16[2*2+1] = 0xffff;
7969 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7970 {
7971 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP;
7972 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP;
7973 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP;
7974 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4;
7975 }
7976 else
7977 {
7978 uPtr.pu32[3] = pSrcX87->FPUIP;
7979 uPtr.pu16[4*2] = pSrcX87->CS;
7980 uPtr.pu16[4*2+1] = pSrcX87->FOP;
7981 uPtr.pu32[5] = pSrcX87->FPUDP;
7982 uPtr.pu16[6*2] = pSrcX87->DS;
7983 uPtr.pu16[6*2+1] = 0xffff;
7984 }
7985 }
7986}
7987
7988
7989/**
7990 * Commmon routine for fldenv and frstor
7991 *
7992 * @param uPtr Where to store the state.
7993 * @param pCtx The CPU context.
7994 */
7995static void iemCImplCommonFpuRestoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
7996{
7997 PX86FXSTATE pDstX87 = &pCtx->CTX_SUFF(pXState)->x87;
7998 if (enmEffOpSize == IEMMODE_16BIT)
7999 {
8000 pDstX87->FCW = uPtr.pu16[0];
8001 pDstX87->FSW = uPtr.pu16[1];
8002 pDstX87->FTW = uPtr.pu16[2];
8003 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
8004 {
8005 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
8006 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
8007 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
8008 pDstX87->CS = 0;
8009 pDstX87->Rsrvd1= 0;
8010 pDstX87->DS = 0;
8011 pDstX87->Rsrvd2= 0;
8012 }
8013 else
8014 {
8015 pDstX87->FPUIP = uPtr.pu16[3];
8016 pDstX87->CS = uPtr.pu16[4];
8017 pDstX87->Rsrvd1= 0;
8018 pDstX87->FPUDP = uPtr.pu16[5];
8019 pDstX87->DS = uPtr.pu16[6];
8020 pDstX87->Rsrvd2= 0;
8021 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
8022 }
8023 }
8024 else
8025 {
8026 pDstX87->FCW = uPtr.pu16[0*2];
8027 pDstX87->FSW = uPtr.pu16[1*2];
8028 pDstX87->FTW = uPtr.pu16[2*2];
8029 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
8030 {
8031 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
8032 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
8033 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
8034 pDstX87->CS = 0;
8035 pDstX87->Rsrvd1= 0;
8036 pDstX87->DS = 0;
8037 pDstX87->Rsrvd2= 0;
8038 }
8039 else
8040 {
8041 pDstX87->FPUIP = uPtr.pu32[3];
8042 pDstX87->CS = uPtr.pu16[4*2];
8043 pDstX87->Rsrvd1= 0;
8044 pDstX87->FOP = uPtr.pu16[4*2+1];
8045 pDstX87->FPUDP = uPtr.pu32[5];
8046 pDstX87->DS = uPtr.pu16[6*2];
8047 pDstX87->Rsrvd2= 0;
8048 }
8049 }
8050
8051 /* Make adjustments. */
8052 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW);
8053 pDstX87->FCW &= ~X86_FCW_ZERO_MASK;
8054 iemFpuRecalcExceptionStatus(pDstX87);
8055 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
8056 * exceptions are pending after loading the saved state? */
8057}
8058
8059
8060/**
8061 * Implements 'FNSTENV'.
8062 *
8063 * @param enmEffOpSize The operand size (only REX.W really matters).
8064 * @param iEffSeg The effective segment register for @a GCPtrEff.
8065 * @param GCPtrEffDst The address of the image.
8066 */
8067IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
8068{
8069 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8070 RTPTRUNION uPtr;
8071 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
8072 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8073 if (rcStrict != VINF_SUCCESS)
8074 return rcStrict;
8075
8076 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx);
8077
8078 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8079 if (rcStrict != VINF_SUCCESS)
8080 return rcStrict;
8081
8082 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
8083 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8084 return VINF_SUCCESS;
8085}
8086
8087
8088/**
8089 * Implements 'FNSAVE'.
8090 *
8091 * @param GCPtrEffDst The address of the image.
8092 * @param enmEffOpSize The operand size.
8093 */
8094IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
8095{
8096 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8097 RTPTRUNION uPtr;
8098 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
8099 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8100 if (rcStrict != VINF_SUCCESS)
8101 return rcStrict;
8102
8103 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8104 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx);
8105 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
8106 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
8107 {
8108 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0];
8109 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1];
8110 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4];
8111 }
8112
8113 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8114 if (rcStrict != VINF_SUCCESS)
8115 return rcStrict;
8116
8117 /*
8118 * Re-initialize the FPU context.
8119 */
8120 pFpuCtx->FCW = 0x37f;
8121 pFpuCtx->FSW = 0;
8122 pFpuCtx->FTW = 0x00; /* 0 - empty */
8123 pFpuCtx->FPUDP = 0;
8124 pFpuCtx->DS = 0;
8125 pFpuCtx->Rsrvd2= 0;
8126 pFpuCtx->FPUIP = 0;
8127 pFpuCtx->CS = 0;
8128 pFpuCtx->Rsrvd1= 0;
8129 pFpuCtx->FOP = 0;
8130
8131 iemHlpUsedFpu(pVCpu);
8132 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8133 return VINF_SUCCESS;
8134}
8135
8136
8137
8138/**
8139 * Implements 'FLDENV'.
8140 *
8141 * @param enmEffOpSize The operand size (only REX.W really matters).
8142 * @param iEffSeg The effective segment register for @a GCPtrEff.
8143 * @param GCPtrEffSrc The address of the image.
8144 */
8145IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
8146{
8147 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8148 RTCPTRUNION uPtr;
8149 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
8150 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
8151 if (rcStrict != VINF_SUCCESS)
8152 return rcStrict;
8153
8154 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx);
8155
8156 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
8157 if (rcStrict != VINF_SUCCESS)
8158 return rcStrict;
8159
8160 iemHlpUsedFpu(pVCpu);
8161 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8162 return VINF_SUCCESS;
8163}
8164
8165
8166/**
8167 * Implements 'FRSTOR'.
8168 *
8169 * @param GCPtrEffSrc The address of the image.
8170 * @param enmEffOpSize The operand size.
8171 */
8172IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
8173{
8174 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8175 RTCPTRUNION uPtr;
8176 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
8177 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
8178 if (rcStrict != VINF_SUCCESS)
8179 return rcStrict;
8180
8181 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8182 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx);
8183 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
8184 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
8185 {
8186 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0];
8187 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1];
8188 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4];
8189 pFpuCtx->aRegs[i].au32[3] = 0;
8190 }
8191
8192 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
8193 if (rcStrict != VINF_SUCCESS)
8194 return rcStrict;
8195
8196 iemHlpUsedFpu(pVCpu);
8197 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8198 return VINF_SUCCESS;
8199}
8200
8201
8202/**
8203 * Implements 'FLDCW'.
8204 *
8205 * @param u16Fcw The new FCW.
8206 */
8207IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
8208{
8209 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8210
8211 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
8212 /** @todo Testcase: Try see what happens when trying to set undefined bits
8213 * (other than 6 and 7). Currently ignoring them. */
8214 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
8215 * according to FSW. (This is was is currently implemented.) */
8216 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8217 pFpuCtx->FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
8218 iemFpuRecalcExceptionStatus(pFpuCtx);
8219
8220 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
8221 iemHlpUsedFpu(pVCpu);
8222 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8223 return VINF_SUCCESS;
8224}
8225
8226
8227
8228/**
8229 * Implements the underflow case of fxch.
8230 *
8231 * @param iStReg The other stack register.
8232 */
8233IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
8234{
8235 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8236
8237 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8238 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW);
8239 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
8240 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW));
8241
8242 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
8243 * registers are read as QNaN and then exchanged. This could be
8244 * wrong... */
8245 if (pFpuCtx->FCW & X86_FCW_IM)
8246 {
8247 if (RT_BIT(iReg1) & pFpuCtx->FTW)
8248 {
8249 if (RT_BIT(iReg2) & pFpuCtx->FTW)
8250 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
8251 else
8252 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80;
8253 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
8254 }
8255 else
8256 {
8257 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80;
8258 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
8259 }
8260 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
8261 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
8262 }
8263 else
8264 {
8265 /* raise underflow exception, don't change anything. */
8266 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
8267 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
8268 }
8269
8270 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
8271 iemHlpUsedFpu(pVCpu);
8272 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8273 return VINF_SUCCESS;
8274}
8275
8276
8277/**
8278 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
8279 *
8280 * @param cToAdd 1 or 7.
8281 */
8282IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
8283{
8284 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8285 Assert(iStReg < 8);
8286
8287 /*
8288 * Raise exceptions.
8289 */
8290 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
8291 return iemRaiseDeviceNotAvailable(pVCpu);
8292
8293 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8294 uint16_t u16Fsw = pFpuCtx->FSW;
8295 if (u16Fsw & X86_FSW_ES)
8296 return iemRaiseMathFault(pVCpu);
8297
8298 /*
8299 * Check if any of the register accesses causes #SF + #IA.
8300 */
8301 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
8302 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
8303 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
8304 {
8305 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
8306 NOREF(u32Eflags);
8307
8308 pFpuCtx->FSW &= ~X86_FSW_C1;
8309 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
8310 if ( !(u16Fsw & X86_FSW_IE)
8311 || (pFpuCtx->FCW & X86_FCW_IM) )
8312 {
8313 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
8314 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
8315 }
8316 }
8317 else if (pFpuCtx->FCW & X86_FCW_IM)
8318 {
8319 /* Masked underflow. */
8320 pFpuCtx->FSW &= ~X86_FSW_C1;
8321 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
8322 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
8323 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
8324 }
8325 else
8326 {
8327 /* Raise underflow - don't touch EFLAGS or TOP. */
8328 pFpuCtx->FSW &= ~X86_FSW_C1;
8329 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
8330 fPop = false;
8331 }
8332
8333 /*
8334 * Pop if necessary.
8335 */
8336 if (fPop)
8337 {
8338 pFpuCtx->FTW &= ~RT_BIT(iReg1);
8339 pFpuCtx->FSW &= X86_FSW_TOP_MASK;
8340 pFpuCtx->FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
8341 }
8342
8343 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
8344 iemHlpUsedFpu(pVCpu);
8345 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8346 return VINF_SUCCESS;
8347}
8348
8349/** @} */
8350
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette