VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 71417

Last change on this file since 71417 was 71416, checked in by vboxsync, 7 years ago

VMM/IEM: Nested Hw.virt: Fix exitinfo1 field for SVM_CTRL_INTERCEPT_CR0_SEL_WRITE intercept.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 284.7 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 71416 2018-03-21 09:30:14Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifdef VBOX_WITH_NESTED_HWVIRT
19# include "IEMAllCImplSvmInstr.cpp.h"
20#endif
21
22/** @name Misc Helpers
23 * @{
24 */
25
26
27/**
28 * Worker function for iemHlpCheckPortIOPermission, don't call directly.
29 *
30 * @returns Strict VBox status code.
31 *
32 * @param pVCpu The cross context virtual CPU structure of the calling thread.
33 * @param pCtx The register context.
34 * @param u16Port The port number.
35 * @param cbOperand The operand size.
36 */
37static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PVMCPU pVCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
38{
39 /* The TSS bits we're interested in are the same on 386 and AMD64. */
40 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY);
41 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
42 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap);
43 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64));
44
45 /*
46 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
47 */
48 Assert(!pCtx->tr.Attr.n.u1DescType);
49 if (RT_UNLIKELY( pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY
50 && pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))
51 {
52 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
53 u16Port, cbOperand, pCtx->tr.Attr.n.u4Type, pCtx->tr.Attr.u));
54 return iemRaiseGeneralProtectionFault0(pVCpu);
55 }
56
57 /*
58 * Read the bitmap offset (may #PF).
59 */
60 uint16_t offBitmap;
61 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &offBitmap, UINT8_MAX,
62 pCtx->tr.u64Base + RT_OFFSETOF(X86TSS64, offIoBitmap));
63 if (rcStrict != VINF_SUCCESS)
64 {
65 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
66 return rcStrict;
67 }
68
69 /*
70 * The bit range from u16Port to (u16Port + cbOperand - 1), however intel
71 * describes the CPU actually reading two bytes regardless of whether the
72 * bit range crosses a byte boundrary. Thus the + 1 in the test below.
73 */
74 uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap;
75 /** @todo check if real CPUs ensures that offBitmap has a minimum value of
76 * for instance sizeof(X86TSS32). */
77 if (offFirstBit + 1 > pCtx->tr.u32Limit) /* the limit is inclusive */
78 {
79 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
80 offFirstBit, pCtx->tr.u32Limit));
81 return iemRaiseGeneralProtectionFault0(pVCpu);
82 }
83
84 /*
85 * Read the necessary bits.
86 */
87 /** @todo Test the assertion in the intel manual that the CPU reads two
88 * bytes. The question is how this works wrt to #PF and #GP on the
89 * 2nd byte when it's not required. */
90 uint16_t bmBytes = UINT16_MAX;
91 rcStrict = iemMemFetchSysU16(pVCpu, &bmBytes, UINT8_MAX, pCtx->tr.u64Base + offFirstBit);
92 if (rcStrict != VINF_SUCCESS)
93 {
94 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
95 return rcStrict;
96 }
97
98 /*
99 * Perform the check.
100 */
101 uint16_t fPortMask = (1 << cbOperand) - 1;
102 bmBytes >>= (u16Port & 7);
103 if (bmBytes & fPortMask)
104 {
105 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
106 u16Port, cbOperand, bmBytes, fPortMask));
107 return iemRaiseGeneralProtectionFault0(pVCpu);
108 }
109
110 return VINF_SUCCESS;
111}
112
113
114/**
115 * Checks if we are allowed to access the given I/O port, raising the
116 * appropriate exceptions if we aren't (or if the I/O bitmap is not
117 * accessible).
118 *
119 * @returns Strict VBox status code.
120 *
121 * @param pVCpu The cross context virtual CPU structure of the calling thread.
122 * @param pCtx The register context.
123 * @param u16Port The port number.
124 * @param cbOperand The operand size.
125 */
126DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PVMCPU pVCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
127{
128 X86EFLAGS Efl;
129 Efl.u = IEMMISC_GET_EFL(pVCpu, pCtx);
130 if ( (pCtx->cr0 & X86_CR0_PE)
131 && ( pVCpu->iem.s.uCpl > Efl.Bits.u2IOPL
132 || Efl.Bits.u1VM) )
133 return iemHlpCheckPortIOPermissionBitmap(pVCpu, pCtx, u16Port, cbOperand);
134 return VINF_SUCCESS;
135}
136
137
138#if 0
139/**
140 * Calculates the parity bit.
141 *
142 * @returns true if the bit is set, false if not.
143 * @param u8Result The least significant byte of the result.
144 */
145static bool iemHlpCalcParityFlag(uint8_t u8Result)
146{
147 /*
148 * Parity is set if the number of bits in the least significant byte of
149 * the result is even.
150 */
151 uint8_t cBits;
152 cBits = u8Result & 1; /* 0 */
153 u8Result >>= 1;
154 cBits += u8Result & 1;
155 u8Result >>= 1;
156 cBits += u8Result & 1;
157 u8Result >>= 1;
158 cBits += u8Result & 1;
159 u8Result >>= 1;
160 cBits += u8Result & 1; /* 4 */
161 u8Result >>= 1;
162 cBits += u8Result & 1;
163 u8Result >>= 1;
164 cBits += u8Result & 1;
165 u8Result >>= 1;
166 cBits += u8Result & 1;
167 return !(cBits & 1);
168}
169#endif /* not used */
170
171
172/**
173 * Updates the specified flags according to a 8-bit result.
174 *
175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
176 * @param u8Result The result to set the flags according to.
177 * @param fToUpdate The flags to update.
178 * @param fUndefined The flags that are specified as undefined.
179 */
180static void iemHlpUpdateArithEFlagsU8(PVMCPU pVCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
181{
182 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
183
184 uint32_t fEFlags = pCtx->eflags.u;
185 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
186 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
187 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
188#ifdef IEM_VERIFICATION_MODE_FULL
189 pVCpu->iem.s.fUndefinedEFlags |= fUndefined;
190#endif
191}
192
193
194/**
195 * Updates the specified flags according to a 16-bit result.
196 *
197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
198 * @param u16Result The result to set the flags according to.
199 * @param fToUpdate The flags to update.
200 * @param fUndefined The flags that are specified as undefined.
201 */
202static void iemHlpUpdateArithEFlagsU16(PVMCPU pVCpu, uint16_t u16Result, uint32_t fToUpdate, uint32_t fUndefined)
203{
204 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
205
206 uint32_t fEFlags = pCtx->eflags.u;
207 iemAImpl_test_u16(&u16Result, u16Result, &fEFlags);
208 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
209 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
210#ifdef IEM_VERIFICATION_MODE_FULL
211 pVCpu->iem.s.fUndefinedEFlags |= fUndefined;
212#endif
213}
214
215
216/**
217 * Helper used by iret.
218 *
219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
220 * @param uCpl The new CPL.
221 * @param pSReg Pointer to the segment register.
222 */
223static void iemHlpAdjustSelectorForNewCpl(PVMCPU pVCpu, uint8_t uCpl, PCPUMSELREG pSReg)
224{
225#ifdef VBOX_WITH_RAW_MODE_NOT_R0
226 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
227 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
228#else
229 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
230#endif
231
232 if ( uCpl > pSReg->Attr.n.u2Dpl
233 && pSReg->Attr.n.u1DescType /* code or data, not system */
234 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
235 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
236 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, 0);
237}
238
239
240/**
241 * Indicates that we have modified the FPU state.
242 *
243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
244 */
245DECLINLINE(void) iemHlpUsedFpu(PVMCPU pVCpu)
246{
247 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
248}
249
250/** @} */
251
252/** @name C Implementations
253 * @{
254 */
255
256/**
257 * Implements a 16-bit popa.
258 */
259IEM_CIMPL_DEF_0(iemCImpl_popa_16)
260{
261 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
262 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu, pCtx);
263 RTGCPTR GCPtrLast = GCPtrStart + 15;
264 VBOXSTRICTRC rcStrict;
265
266 /*
267 * The docs are a bit hard to comprehend here, but it looks like we wrap
268 * around in real mode as long as none of the individual "popa" crosses the
269 * end of the stack segment. In protected mode we check the whole access
270 * in one go. For efficiency, only do the word-by-word thing if we're in
271 * danger of wrapping around.
272 */
273 /** @todo do popa boundary / wrap-around checks. */
274 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
275 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
276 {
277 /* word-by-word */
278 RTUINT64U TmpRsp;
279 TmpRsp.u = pCtx->rsp;
280 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->di, &TmpRsp);
281 if (rcStrict == VINF_SUCCESS)
282 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->si, &TmpRsp);
283 if (rcStrict == VINF_SUCCESS)
284 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->bp, &TmpRsp);
285 if (rcStrict == VINF_SUCCESS)
286 {
287 iemRegAddToRspEx(pVCpu, pCtx, &TmpRsp, 2); /* sp */
288 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->bx, &TmpRsp);
289 }
290 if (rcStrict == VINF_SUCCESS)
291 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->dx, &TmpRsp);
292 if (rcStrict == VINF_SUCCESS)
293 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->cx, &TmpRsp);
294 if (rcStrict == VINF_SUCCESS)
295 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->ax, &TmpRsp);
296 if (rcStrict == VINF_SUCCESS)
297 {
298 pCtx->rsp = TmpRsp.u;
299 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
300 }
301 }
302 else
303 {
304 uint16_t const *pa16Mem = NULL;
305 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
306 if (rcStrict == VINF_SUCCESS)
307 {
308 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
309 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
310 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
311 /* skip sp */
312 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
313 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
314 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
315 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
316 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
317 if (rcStrict == VINF_SUCCESS)
318 {
319 iemRegAddToRsp(pVCpu, pCtx, 16);
320 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
321 }
322 }
323 }
324 return rcStrict;
325}
326
327
328/**
329 * Implements a 32-bit popa.
330 */
331IEM_CIMPL_DEF_0(iemCImpl_popa_32)
332{
333 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
334 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu, pCtx);
335 RTGCPTR GCPtrLast = GCPtrStart + 31;
336 VBOXSTRICTRC rcStrict;
337
338 /*
339 * The docs are a bit hard to comprehend here, but it looks like we wrap
340 * around in real mode as long as none of the individual "popa" crosses the
341 * end of the stack segment. In protected mode we check the whole access
342 * in one go. For efficiency, only do the word-by-word thing if we're in
343 * danger of wrapping around.
344 */
345 /** @todo do popa boundary / wrap-around checks. */
346 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
347 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
348 {
349 /* word-by-word */
350 RTUINT64U TmpRsp;
351 TmpRsp.u = pCtx->rsp;
352 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->edi, &TmpRsp);
353 if (rcStrict == VINF_SUCCESS)
354 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->esi, &TmpRsp);
355 if (rcStrict == VINF_SUCCESS)
356 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->ebp, &TmpRsp);
357 if (rcStrict == VINF_SUCCESS)
358 {
359 iemRegAddToRspEx(pVCpu, pCtx, &TmpRsp, 2); /* sp */
360 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->ebx, &TmpRsp);
361 }
362 if (rcStrict == VINF_SUCCESS)
363 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->edx, &TmpRsp);
364 if (rcStrict == VINF_SUCCESS)
365 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->ecx, &TmpRsp);
366 if (rcStrict == VINF_SUCCESS)
367 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->eax, &TmpRsp);
368 if (rcStrict == VINF_SUCCESS)
369 {
370#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
371 pCtx->rdi &= UINT32_MAX;
372 pCtx->rsi &= UINT32_MAX;
373 pCtx->rbp &= UINT32_MAX;
374 pCtx->rbx &= UINT32_MAX;
375 pCtx->rdx &= UINT32_MAX;
376 pCtx->rcx &= UINT32_MAX;
377 pCtx->rax &= UINT32_MAX;
378#endif
379 pCtx->rsp = TmpRsp.u;
380 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
381 }
382 }
383 else
384 {
385 uint32_t const *pa32Mem;
386 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
387 if (rcStrict == VINF_SUCCESS)
388 {
389 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
390 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
391 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
392 /* skip esp */
393 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
394 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
395 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
396 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
397 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
398 if (rcStrict == VINF_SUCCESS)
399 {
400 iemRegAddToRsp(pVCpu, pCtx, 32);
401 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
402 }
403 }
404 }
405 return rcStrict;
406}
407
408
409/**
410 * Implements a 16-bit pusha.
411 */
412IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
413{
414 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
415 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu, pCtx);
416 RTGCPTR GCPtrBottom = GCPtrTop - 15;
417 VBOXSTRICTRC rcStrict;
418
419 /*
420 * The docs are a bit hard to comprehend here, but it looks like we wrap
421 * around in real mode as long as none of the individual "pushd" crosses the
422 * end of the stack segment. In protected mode we check the whole access
423 * in one go. For efficiency, only do the word-by-word thing if we're in
424 * danger of wrapping around.
425 */
426 /** @todo do pusha boundary / wrap-around checks. */
427 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
428 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
429 {
430 /* word-by-word */
431 RTUINT64U TmpRsp;
432 TmpRsp.u = pCtx->rsp;
433 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->ax, &TmpRsp);
434 if (rcStrict == VINF_SUCCESS)
435 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->cx, &TmpRsp);
436 if (rcStrict == VINF_SUCCESS)
437 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->dx, &TmpRsp);
438 if (rcStrict == VINF_SUCCESS)
439 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->bx, &TmpRsp);
440 if (rcStrict == VINF_SUCCESS)
441 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->sp, &TmpRsp);
442 if (rcStrict == VINF_SUCCESS)
443 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->bp, &TmpRsp);
444 if (rcStrict == VINF_SUCCESS)
445 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->si, &TmpRsp);
446 if (rcStrict == VINF_SUCCESS)
447 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->di, &TmpRsp);
448 if (rcStrict == VINF_SUCCESS)
449 {
450 pCtx->rsp = TmpRsp.u;
451 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
452 }
453 }
454 else
455 {
456 GCPtrBottom--;
457 uint16_t *pa16Mem = NULL;
458 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
459 if (rcStrict == VINF_SUCCESS)
460 {
461 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
462 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
463 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
464 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
465 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
466 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
467 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
468 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
469 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
470 if (rcStrict == VINF_SUCCESS)
471 {
472 iemRegSubFromRsp(pVCpu, pCtx, 16);
473 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
474 }
475 }
476 }
477 return rcStrict;
478}
479
480
481/**
482 * Implements a 32-bit pusha.
483 */
484IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
485{
486 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
487 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu, pCtx);
488 RTGCPTR GCPtrBottom = GCPtrTop - 31;
489 VBOXSTRICTRC rcStrict;
490
491 /*
492 * The docs are a bit hard to comprehend here, but it looks like we wrap
493 * around in real mode as long as none of the individual "pusha" crosses the
494 * end of the stack segment. In protected mode we check the whole access
495 * in one go. For efficiency, only do the word-by-word thing if we're in
496 * danger of wrapping around.
497 */
498 /** @todo do pusha boundary / wrap-around checks. */
499 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
500 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
501 {
502 /* word-by-word */
503 RTUINT64U TmpRsp;
504 TmpRsp.u = pCtx->rsp;
505 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->eax, &TmpRsp);
506 if (rcStrict == VINF_SUCCESS)
507 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->ecx, &TmpRsp);
508 if (rcStrict == VINF_SUCCESS)
509 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->edx, &TmpRsp);
510 if (rcStrict == VINF_SUCCESS)
511 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->ebx, &TmpRsp);
512 if (rcStrict == VINF_SUCCESS)
513 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->esp, &TmpRsp);
514 if (rcStrict == VINF_SUCCESS)
515 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->ebp, &TmpRsp);
516 if (rcStrict == VINF_SUCCESS)
517 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->esi, &TmpRsp);
518 if (rcStrict == VINF_SUCCESS)
519 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->edi, &TmpRsp);
520 if (rcStrict == VINF_SUCCESS)
521 {
522 pCtx->rsp = TmpRsp.u;
523 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
524 }
525 }
526 else
527 {
528 GCPtrBottom--;
529 uint32_t *pa32Mem;
530 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
531 if (rcStrict == VINF_SUCCESS)
532 {
533 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
534 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
535 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
536 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
537 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
538 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
539 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
540 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
541 rcStrict = iemMemCommitAndUnmap(pVCpu, pa32Mem, IEM_ACCESS_STACK_W);
542 if (rcStrict == VINF_SUCCESS)
543 {
544 iemRegSubFromRsp(pVCpu, pCtx, 32);
545 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
546 }
547 }
548 }
549 return rcStrict;
550}
551
552
553/**
554 * Implements pushf.
555 *
556 *
557 * @param enmEffOpSize The effective operand size.
558 */
559IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
560{
561 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
562 VBOXSTRICTRC rcStrict;
563
564 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_PUSHF))
565 {
566 Log2(("pushf: Guest intercept -> #VMEXIT\n"));
567 IEM_SVM_UPDATE_NRIP(pVCpu);
568 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
569 }
570
571 /*
572 * If we're in V8086 mode some care is required (which is why we're in
573 * doing this in a C implementation).
574 */
575 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
576 if ( (fEfl & X86_EFL_VM)
577 && X86_EFL_GET_IOPL(fEfl) != 3 )
578 {
579 Assert(pCtx->cr0 & X86_CR0_PE);
580 if ( enmEffOpSize != IEMMODE_16BIT
581 || !(pCtx->cr4 & X86_CR4_VME))
582 return iemRaiseGeneralProtectionFault0(pVCpu);
583 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
584 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
585 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
586 }
587 else
588 {
589
590 /*
591 * Ok, clear RF and VM, adjust for ancient CPUs, and push the flags.
592 */
593 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
594
595 switch (enmEffOpSize)
596 {
597 case IEMMODE_16BIT:
598 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
599 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_186)
600 fEfl |= UINT16_C(0xf000);
601 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
602 break;
603 case IEMMODE_32BIT:
604 rcStrict = iemMemStackPushU32(pVCpu, fEfl);
605 break;
606 case IEMMODE_64BIT:
607 rcStrict = iemMemStackPushU64(pVCpu, fEfl);
608 break;
609 IEM_NOT_REACHED_DEFAULT_CASE_RET();
610 }
611 }
612 if (rcStrict != VINF_SUCCESS)
613 return rcStrict;
614
615 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
616 return VINF_SUCCESS;
617}
618
619
620/**
621 * Implements popf.
622 *
623 * @param enmEffOpSize The effective operand size.
624 */
625IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
626{
627 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
628 uint32_t const fEflOld = IEMMISC_GET_EFL(pVCpu, pCtx);
629 VBOXSTRICTRC rcStrict;
630 uint32_t fEflNew;
631
632 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_POPF))
633 {
634 Log2(("popf: Guest intercept -> #VMEXIT\n"));
635 IEM_SVM_UPDATE_NRIP(pVCpu);
636 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
637 }
638
639 /*
640 * V8086 is special as usual.
641 */
642 if (fEflOld & X86_EFL_VM)
643 {
644 /*
645 * Almost anything goes if IOPL is 3.
646 */
647 if (X86_EFL_GET_IOPL(fEflOld) == 3)
648 {
649 switch (enmEffOpSize)
650 {
651 case IEMMODE_16BIT:
652 {
653 uint16_t u16Value;
654 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
655 if (rcStrict != VINF_SUCCESS)
656 return rcStrict;
657 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
658 break;
659 }
660 case IEMMODE_32BIT:
661 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
662 if (rcStrict != VINF_SUCCESS)
663 return rcStrict;
664 break;
665 IEM_NOT_REACHED_DEFAULT_CASE_RET();
666 }
667
668 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
669 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
670 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
671 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
672 }
673 /*
674 * Interrupt flag virtualization with CR4.VME=1.
675 */
676 else if ( enmEffOpSize == IEMMODE_16BIT
677 && (pCtx->cr4 & X86_CR4_VME) )
678 {
679 uint16_t u16Value;
680 RTUINT64U TmpRsp;
681 TmpRsp.u = pCtx->rsp;
682 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp);
683 if (rcStrict != VINF_SUCCESS)
684 return rcStrict;
685
686 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
687 * or before? */
688 if ( ( (u16Value & X86_EFL_IF)
689 && (fEflOld & X86_EFL_VIP))
690 || (u16Value & X86_EFL_TF) )
691 return iemRaiseGeneralProtectionFault0(pVCpu);
692
693 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
694 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
695 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
696 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
697
698 pCtx->rsp = TmpRsp.u;
699 }
700 else
701 return iemRaiseGeneralProtectionFault0(pVCpu);
702
703 }
704 /*
705 * Not in V8086 mode.
706 */
707 else
708 {
709 /* Pop the flags. */
710 switch (enmEffOpSize)
711 {
712 case IEMMODE_16BIT:
713 {
714 uint16_t u16Value;
715 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
716 if (rcStrict != VINF_SUCCESS)
717 return rcStrict;
718 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
719
720 /*
721 * Ancient CPU adjustments:
722 * - 8086, 80186, V20/30:
723 * Fixed bits 15:12 bits are not kept correctly internally, mostly for
724 * practical reasons (masking below). We add them when pushing flags.
725 * - 80286:
726 * The NT and IOPL flags cannot be popped from real mode and are
727 * therefore always zero (since a 286 can never exit from PM and
728 * their initial value is zero). This changed on a 386 and can
729 * therefore be used to detect 286 or 386 CPU in real mode.
730 */
731 if ( IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286
732 && !(pCtx->cr0 & X86_CR0_PE) )
733 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL);
734 break;
735 }
736 case IEMMODE_32BIT:
737 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
738 if (rcStrict != VINF_SUCCESS)
739 return rcStrict;
740 break;
741 case IEMMODE_64BIT:
742 {
743 uint64_t u64Value;
744 rcStrict = iemMemStackPopU64(pVCpu, &u64Value);
745 if (rcStrict != VINF_SUCCESS)
746 return rcStrict;
747 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
748 break;
749 }
750 IEM_NOT_REACHED_DEFAULT_CASE_RET();
751 }
752
753 /* Merge them with the current flags. */
754 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
755 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
756 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
757 || pVCpu->iem.s.uCpl == 0)
758 {
759 fEflNew &= fPopfBits;
760 fEflNew |= ~fPopfBits & fEflOld;
761 }
762 else if (pVCpu->iem.s.uCpl <= X86_EFL_GET_IOPL(fEflOld))
763 {
764 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
765 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
766 }
767 else
768 {
769 fEflNew &= fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF);
770 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
771 }
772 }
773
774 /*
775 * Commit the flags.
776 */
777 Assert(fEflNew & RT_BIT_32(1));
778 IEMMISC_SET_EFL(pVCpu, pCtx, fEflNew);
779 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
780
781 return VINF_SUCCESS;
782}
783
784
785/**
786 * Implements an indirect call.
787 *
788 * @param uNewPC The new program counter (RIP) value (loaded from the
789 * operand).
790 * @param enmEffOpSize The effective operand size.
791 */
792IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
793{
794 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
795 uint16_t uOldPC = pCtx->ip + cbInstr;
796 if (uNewPC > pCtx->cs.u32Limit)
797 return iemRaiseGeneralProtectionFault0(pVCpu);
798
799 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
800 if (rcStrict != VINF_SUCCESS)
801 return rcStrict;
802
803 pCtx->rip = uNewPC;
804 pCtx->eflags.Bits.u1RF = 0;
805
806#ifndef IEM_WITH_CODE_TLB
807 /* Flush the prefetch buffer. */
808 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
809#endif
810 return VINF_SUCCESS;
811}
812
813
814/**
815 * Implements a 16-bit relative call.
816 *
817 * @param offDisp The displacment offset.
818 */
819IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
820{
821 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
822 uint16_t uOldPC = pCtx->ip + cbInstr;
823 uint16_t uNewPC = uOldPC + offDisp;
824 if (uNewPC > pCtx->cs.u32Limit)
825 return iemRaiseGeneralProtectionFault0(pVCpu);
826
827 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
828 if (rcStrict != VINF_SUCCESS)
829 return rcStrict;
830
831 pCtx->rip = uNewPC;
832 pCtx->eflags.Bits.u1RF = 0;
833
834#ifndef IEM_WITH_CODE_TLB
835 /* Flush the prefetch buffer. */
836 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
837#endif
838 return VINF_SUCCESS;
839}
840
841
842/**
843 * Implements a 32-bit indirect call.
844 *
845 * @param uNewPC The new program counter (RIP) value (loaded from the
846 * operand).
847 * @param enmEffOpSize The effective operand size.
848 */
849IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
850{
851 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
852 uint32_t uOldPC = pCtx->eip + cbInstr;
853 if (uNewPC > pCtx->cs.u32Limit)
854 return iemRaiseGeneralProtectionFault0(pVCpu);
855
856 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
857 if (rcStrict != VINF_SUCCESS)
858 return rcStrict;
859
860#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE) && defined(VBOX_WITH_CALL_RECORD)
861 /*
862 * CASM hook for recording interesting indirect calls.
863 */
864 if ( !pCtx->eflags.Bits.u1IF
865 && (pCtx->cr0 & X86_CR0_PG)
866 && !CSAMIsEnabled(pVCpu->CTX_SUFF(pVM))
867 && pVCpu->iem.s.uCpl == 0)
868 {
869 EMSTATE enmState = EMGetState(pVCpu);
870 if ( enmState == EMSTATE_IEM_THEN_REM
871 || enmState == EMSTATE_IEM
872 || enmState == EMSTATE_REM)
873 CSAMR3RecordCallAddress(pVCpu->CTX_SUFF(pVM), pCtx->eip);
874 }
875#endif
876
877 pCtx->rip = uNewPC;
878 pCtx->eflags.Bits.u1RF = 0;
879
880#ifndef IEM_WITH_CODE_TLB
881 /* Flush the prefetch buffer. */
882 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
883#endif
884 return VINF_SUCCESS;
885}
886
887
888/**
889 * Implements a 32-bit relative call.
890 *
891 * @param offDisp The displacment offset.
892 */
893IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
894{
895 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
896 uint32_t uOldPC = pCtx->eip + cbInstr;
897 uint32_t uNewPC = uOldPC + offDisp;
898 if (uNewPC > pCtx->cs.u32Limit)
899 return iemRaiseGeneralProtectionFault0(pVCpu);
900
901 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
902 if (rcStrict != VINF_SUCCESS)
903 return rcStrict;
904
905 pCtx->rip = uNewPC;
906 pCtx->eflags.Bits.u1RF = 0;
907
908#ifndef IEM_WITH_CODE_TLB
909 /* Flush the prefetch buffer. */
910 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
911#endif
912 return VINF_SUCCESS;
913}
914
915
916/**
917 * Implements a 64-bit indirect call.
918 *
919 * @param uNewPC The new program counter (RIP) value (loaded from the
920 * operand).
921 * @param enmEffOpSize The effective operand size.
922 */
923IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
924{
925 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
926 uint64_t uOldPC = pCtx->rip + cbInstr;
927 if (!IEM_IS_CANONICAL(uNewPC))
928 return iemRaiseGeneralProtectionFault0(pVCpu);
929
930 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
931 if (rcStrict != VINF_SUCCESS)
932 return rcStrict;
933
934 pCtx->rip = uNewPC;
935 pCtx->eflags.Bits.u1RF = 0;
936
937#ifndef IEM_WITH_CODE_TLB
938 /* Flush the prefetch buffer. */
939 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
940#endif
941 return VINF_SUCCESS;
942}
943
944
945/**
946 * Implements a 64-bit relative call.
947 *
948 * @param offDisp The displacment offset.
949 */
950IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
951{
952 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
953 uint64_t uOldPC = pCtx->rip + cbInstr;
954 uint64_t uNewPC = uOldPC + offDisp;
955 if (!IEM_IS_CANONICAL(uNewPC))
956 return iemRaiseNotCanonical(pVCpu);
957
958 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
959 if (rcStrict != VINF_SUCCESS)
960 return rcStrict;
961
962 pCtx->rip = uNewPC;
963 pCtx->eflags.Bits.u1RF = 0;
964
965#ifndef IEM_WITH_CODE_TLB
966 /* Flush the prefetch buffer. */
967 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
968#endif
969
970 return VINF_SUCCESS;
971}
972
973
974/**
975 * Implements far jumps and calls thru task segments (TSS).
976 *
977 * @param uSel The selector.
978 * @param enmBranch The kind of branching we're performing.
979 * @param enmEffOpSize The effective operand size.
980 * @param pDesc The descriptor corresponding to @a uSel. The type is
981 * task gate.
982 */
983IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
984{
985#ifndef IEM_IMPLEMENTS_TASKSWITCH
986 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
987#else
988 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
989 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
990 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
991 RT_NOREF_PV(enmEffOpSize);
992
993 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
994 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
995 {
996 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
997 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
998 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
999 }
1000
1001 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
1002 * far calls (see iemCImpl_callf). Most likely in both cases it should be
1003 * checked here, need testcases. */
1004 if (!pDesc->Legacy.Gen.u1Present)
1005 {
1006 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel));
1007 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1008 }
1009
1010 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1011 uint32_t uNextEip = pCtx->eip + cbInstr;
1012 return iemTaskSwitch(pVCpu, pCtx, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1013 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc);
1014#endif
1015}
1016
1017
1018/**
1019 * Implements far jumps and calls thru task gates.
1020 *
1021 * @param uSel The selector.
1022 * @param enmBranch The kind of branching we're performing.
1023 * @param enmEffOpSize The effective operand size.
1024 * @param pDesc The descriptor corresponding to @a uSel. The type is
1025 * task gate.
1026 */
1027IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1028{
1029#ifndef IEM_IMPLEMENTS_TASKSWITCH
1030 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1031#else
1032 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1033 RT_NOREF_PV(enmEffOpSize);
1034
1035 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
1036 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1037 {
1038 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1039 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1040 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1041 }
1042
1043 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
1044 * far calls (see iemCImpl_callf). Most likely in both cases it should be
1045 * checked here, need testcases. */
1046 if (!pDesc->Legacy.Gen.u1Present)
1047 {
1048 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel));
1049 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1050 }
1051
1052 /*
1053 * Fetch the new TSS descriptor from the GDT.
1054 */
1055 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel;
1056 if (uSelTss & X86_SEL_LDT)
1057 {
1058 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss));
1059 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1060 }
1061
1062 IEMSELDESC TssDesc;
1063 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelTss, X86_XCPT_GP);
1064 if (rcStrict != VINF_SUCCESS)
1065 return rcStrict;
1066
1067 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK)
1068 {
1069 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss,
1070 TssDesc.Legacy.Gate.u4Type));
1071 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1072 }
1073
1074 if (!TssDesc.Legacy.Gate.u1Present)
1075 {
1076 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss));
1077 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelTss & X86_SEL_MASK_OFF_RPL);
1078 }
1079
1080 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1081 uint32_t uNextEip = pCtx->eip + cbInstr;
1082 return iemTaskSwitch(pVCpu, pCtx, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1083 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc);
1084#endif
1085}
1086
1087
1088/**
1089 * Implements far jumps and calls thru call gates.
1090 *
1091 * @param uSel The selector.
1092 * @param enmBranch The kind of branching we're performing.
1093 * @param enmEffOpSize The effective operand size.
1094 * @param pDesc The descriptor corresponding to @a uSel. The type is
1095 * call gate.
1096 */
1097IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1098{
1099#define IEM_IMPLEMENTS_CALLGATE
1100#ifndef IEM_IMPLEMENTS_CALLGATE
1101 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1102#else
1103 RT_NOREF_PV(enmEffOpSize);
1104
1105 /* NB: Far jumps can only do intra-privilege transfers. Far calls support
1106 * inter-privilege calls and are much more complex.
1107 *
1108 * NB: 64-bit call gate has the same type as a 32-bit call gate! If
1109 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
1110 * must be 16-bit or 32-bit.
1111 */
1112 /** @todo: effective operand size is probably irrelevant here, only the
1113 * call gate bitness matters??
1114 */
1115 VBOXSTRICTRC rcStrict;
1116 RTPTRUNION uPtrRet;
1117 uint64_t uNewRsp;
1118 uint64_t uNewRip;
1119 uint64_t u64Base;
1120 uint32_t cbLimit;
1121 RTSEL uNewCS;
1122 IEMSELDESC DescCS;
1123
1124 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE);
1125 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1126 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE
1127 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE);
1128
1129 /* Determine the new instruction pointer from the gate descriptor. */
1130 uNewRip = pDesc->Legacy.Gate.u16OffsetLow
1131 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16)
1132 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32);
1133
1134 /* Perform DPL checks on the gate descriptor. */
1135 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
1136 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1137 {
1138 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1139 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1140 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1141 }
1142
1143 /** @todo does this catch NULL selectors, too? */
1144 if (!pDesc->Legacy.Gen.u1Present)
1145 {
1146 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel));
1147 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1148 }
1149
1150 /*
1151 * Fetch the target CS descriptor from the GDT or LDT.
1152 */
1153 uNewCS = pDesc->Legacy.Gate.u16Sel;
1154 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_GP);
1155 if (rcStrict != VINF_SUCCESS)
1156 return rcStrict;
1157
1158 /* Target CS must be a code selector. */
1159 if ( !DescCS.Legacy.Gen.u1DescType
1160 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1161 {
1162 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1163 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1164 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1165 }
1166
1167 /* Privilege checks on target CS. */
1168 if (enmBranch == IEMBRANCH_JUMP)
1169 {
1170 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1171 {
1172 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
1173 {
1174 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1175 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1176 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1177 }
1178 }
1179 else
1180 {
1181 if (DescCS.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl)
1182 {
1183 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1184 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1185 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1186 }
1187 }
1188 }
1189 else
1190 {
1191 Assert(enmBranch == IEMBRANCH_CALL);
1192 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
1193 {
1194 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1195 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1196 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1197 }
1198 }
1199
1200 /* Additional long mode checks. */
1201 if (IEM_IS_LONG_MODE(pVCpu))
1202 {
1203 if (!DescCS.Legacy.Gen.u1Long)
1204 {
1205 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS));
1206 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1207 }
1208
1209 /* L vs D. */
1210 if ( DescCS.Legacy.Gen.u1Long
1211 && DescCS.Legacy.Gen.u1DefBig)
1212 {
1213 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS));
1214 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1215 }
1216 }
1217
1218 if (!DescCS.Legacy.Gate.u1Present)
1219 {
1220 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS));
1221 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCS);
1222 }
1223
1224 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1225
1226 if (enmBranch == IEMBRANCH_JUMP)
1227 {
1228 /** @todo: This is very similar to regular far jumps; merge! */
1229 /* Jumps are fairly simple... */
1230
1231 /* Chop the high bits off if 16-bit gate (Intel says so). */
1232 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1233 uNewRip = (uint16_t)uNewRip;
1234
1235 /* Limit check for non-long segments. */
1236 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1237 if (DescCS.Legacy.Gen.u1Long)
1238 u64Base = 0;
1239 else
1240 {
1241 if (uNewRip > cbLimit)
1242 {
1243 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
1244 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1245 }
1246 u64Base = X86DESC_BASE(&DescCS.Legacy);
1247 }
1248
1249 /* Canonical address check. */
1250 if (!IEM_IS_CANONICAL(uNewRip))
1251 {
1252 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1253 return iemRaiseNotCanonical(pVCpu);
1254 }
1255
1256 /*
1257 * Ok, everything checked out fine. Now set the accessed bit before
1258 * committing the result into CS, CSHID and RIP.
1259 */
1260 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1261 {
1262 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1263 if (rcStrict != VINF_SUCCESS)
1264 return rcStrict;
1265 /** @todo check what VT-x and AMD-V does. */
1266 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1267 }
1268
1269 /* commit */
1270 pCtx->rip = uNewRip;
1271 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1272 pCtx->cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */
1273 pCtx->cs.ValidSel = pCtx->cs.Sel;
1274 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1275 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1276 pCtx->cs.u32Limit = cbLimit;
1277 pCtx->cs.u64Base = u64Base;
1278 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1279 }
1280 else
1281 {
1282 Assert(enmBranch == IEMBRANCH_CALL);
1283 /* Calls are much more complicated. */
1284
1285 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) && (DescCS.Legacy.Gen.u2Dpl < pVCpu->iem.s.uCpl))
1286 {
1287 uint16_t offNewStack; /* Offset of new stack in TSS. */
1288 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */
1289 uint8_t uNewCSDpl;
1290 uint8_t cbWords;
1291 RTSEL uNewSS;
1292 RTSEL uOldSS;
1293 uint64_t uOldRsp;
1294 IEMSELDESC DescSS;
1295 RTPTRUNION uPtrTSS;
1296 RTGCPTR GCPtrTSS;
1297 RTPTRUNION uPtrParmWds;
1298 RTGCPTR GCPtrParmWds;
1299
1300 /* More privilege. This is the fun part. */
1301 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */
1302
1303 /*
1304 * Determine new SS:rSP from the TSS.
1305 */
1306 Assert(!pCtx->tr.Attr.n.u1DescType);
1307
1308 /* Figure out where the new stack pointer is stored in the TSS. */
1309 uNewCSDpl = DescCS.Legacy.Gen.u2Dpl;
1310 if (!IEM_IS_LONG_MODE(pVCpu))
1311 {
1312 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1313 {
1314 offNewStack = RT_OFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8;
1315 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0);
1316 }
1317 else
1318 {
1319 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1320 offNewStack = RT_OFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4;
1321 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0);
1322 }
1323 }
1324 else
1325 {
1326 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1327 offNewStack = RT_OFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0);
1328 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0);
1329 }
1330
1331 /* Check against TSS limit. */
1332 if ((uint16_t)(offNewStack + cbNewStack - 1) > pCtx->tr.u32Limit)
1333 {
1334 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pCtx->tr.u32Limit));
1335 return iemRaiseTaskSwitchFaultBySelector(pVCpu, pCtx->tr.Sel);
1336 }
1337
1338 GCPtrTSS = pCtx->tr.u64Base + offNewStack;
1339 rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R);
1340 if (rcStrict != VINF_SUCCESS)
1341 {
1342 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1343 return rcStrict;
1344 }
1345
1346 if (!IEM_IS_LONG_MODE(pVCpu))
1347 {
1348 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1349 {
1350 uNewRsp = uPtrTSS.pu32[0];
1351 uNewSS = uPtrTSS.pu16[2];
1352 }
1353 else
1354 {
1355 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1356 uNewRsp = uPtrTSS.pu16[0];
1357 uNewSS = uPtrTSS.pu16[1];
1358 }
1359 }
1360 else
1361 {
1362 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1363 /* SS will be a NULL selector, but that's valid. */
1364 uNewRsp = uPtrTSS.pu64[0];
1365 uNewSS = uNewCSDpl;
1366 }
1367
1368 /* Done with the TSS now. */
1369 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);
1370 if (rcStrict != VINF_SUCCESS)
1371 {
1372 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1373 return rcStrict;
1374 }
1375
1376 /* Only used outside of long mode. */
1377 cbWords = pDesc->Legacy.Gate.u5ParmCount;
1378
1379 /* If EFER.LMA is 0, there's extra work to do. */
1380 if (!IEM_IS_LONG_MODE(pVCpu))
1381 {
1382 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
1383 {
1384 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
1385 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1386 }
1387
1388 /* Grab the new SS descriptor. */
1389 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1390 if (rcStrict != VINF_SUCCESS)
1391 return rcStrict;
1392
1393 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */
1394 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL))
1395 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl))
1396 {
1397 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n",
1398 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl));
1399 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1400 }
1401
1402 /* Ensure new SS is a writable data segment. */
1403 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
1404 {
1405 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
1406 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1407 }
1408
1409 if (!DescSS.Legacy.Gen.u1Present)
1410 {
1411 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS));
1412 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
1413 }
1414 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1415 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords);
1416 else
1417 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords);
1418 }
1419 else
1420 {
1421 /* Just grab the new (NULL) SS descriptor. */
1422 /** @todo testcase: Check whether the zero GDT entry is actually loaded here
1423 * like we do... */
1424 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1425 if (rcStrict != VINF_SUCCESS)
1426 return rcStrict;
1427
1428 cbNewStack = sizeof(uint64_t) * 4;
1429 }
1430
1431 /** @todo: According to Intel, new stack is checked for enough space first,
1432 * then switched. According to AMD, the stack is switched first and
1433 * then pushes might fault!
1434 * NB: OS/2 Warp 3/4 actively relies on the fact that possible
1435 * incoming stack #PF happens before actual stack switch. AMD is
1436 * either lying or implicitly assumes that new state is committed
1437 * only if and when an instruction doesn't fault.
1438 */
1439
1440 /** @todo: According to AMD, CS is loaded first, then SS.
1441 * According to Intel, it's the other way around!?
1442 */
1443
1444 /** @todo: Intel and AMD disagree on when exactly the CPL changes! */
1445
1446 /* Set the accessed bit before committing new SS. */
1447 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1448 {
1449 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
1450 if (rcStrict != VINF_SUCCESS)
1451 return rcStrict;
1452 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1453 }
1454
1455 /* Remember the old SS:rSP and their linear address. */
1456 uOldSS = pCtx->ss.Sel;
1457 uOldRsp = pCtx->ss.Attr.n.u1DefBig ? pCtx->rsp : pCtx->sp;
1458
1459 GCPtrParmWds = pCtx->ss.u64Base + uOldRsp;
1460
1461 /* HACK ALERT! Probe if the write to the new stack will succeed. May #SS(NewSS)
1462 or #PF, the former is not implemented in this workaround. */
1463 /** @todo Proper fix callgate target stack exceptions. */
1464 /** @todo testcase: Cover callgates with partially or fully inaccessible
1465 * target stacks. */
1466 void *pvNewFrame;
1467 RTGCPTR GCPtrNewStack = X86DESC_BASE(&DescSS.Legacy) + uNewRsp - cbNewStack;
1468 rcStrict = iemMemMap(pVCpu, &pvNewFrame, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW);
1469 if (rcStrict != VINF_SUCCESS)
1470 {
1471 Log(("BranchCallGate: Incoming stack (%04x:%08RX64) not accessible, rc=%Rrc\n", uNewSS, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
1472 return rcStrict;
1473 }
1474 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewFrame, IEM_ACCESS_SYS_RW);
1475 if (rcStrict != VINF_SUCCESS)
1476 {
1477 Log(("BranchCallGate: New stack probe unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1478 return rcStrict;
1479 }
1480
1481 /* Commit new SS:rSP. */
1482 pCtx->ss.Sel = uNewSS;
1483 pCtx->ss.ValidSel = uNewSS;
1484 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1485 pCtx->ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);
1486 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1487 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1488 pCtx->rsp = uNewRsp;
1489 pVCpu->iem.s.uCpl = uNewCSDpl;
1490 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1491 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
1492
1493 /* At this point the stack access must not fail because new state was already committed. */
1494 /** @todo this can still fail due to SS.LIMIT not check. */
1495 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbNewStack,
1496 &uPtrRet.pv, &uNewRsp);
1497 AssertMsgReturn(rcStrict == VINF_SUCCESS, ("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)),
1498 VERR_INTERNAL_ERROR_5);
1499
1500 if (!IEM_IS_LONG_MODE(pVCpu))
1501 {
1502 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1503 {
1504 /* Push the old CS:rIP. */
1505 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1506 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1507
1508 if (cbWords)
1509 {
1510 /* Map the relevant chunk of the old stack. */
1511 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1512 if (rcStrict != VINF_SUCCESS)
1513 {
1514 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1515 return rcStrict;
1516 }
1517
1518 /* Copy the parameter (d)words. */
1519 for (int i = 0; i < cbWords; ++i)
1520 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i];
1521
1522 /* Unmap the old stack. */
1523 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1524 if (rcStrict != VINF_SUCCESS)
1525 {
1526 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1527 return rcStrict;
1528 }
1529 }
1530
1531 /* Push the old SS:rSP. */
1532 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
1533 uPtrRet.pu32[2 + cbWords + 1] = uOldSS;
1534 }
1535 else
1536 {
1537 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1538
1539 /* Push the old CS:rIP. */
1540 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1541 uPtrRet.pu16[1] = pCtx->cs.Sel;
1542
1543 if (cbWords)
1544 {
1545 /* Map the relevant chunk of the old stack. */
1546 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1547 if (rcStrict != VINF_SUCCESS)
1548 {
1549 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1550 return rcStrict;
1551 }
1552
1553 /* Copy the parameter words. */
1554 for (int i = 0; i < cbWords; ++i)
1555 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i];
1556
1557 /* Unmap the old stack. */
1558 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1559 if (rcStrict != VINF_SUCCESS)
1560 {
1561 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1562 return rcStrict;
1563 }
1564 }
1565
1566 /* Push the old SS:rSP. */
1567 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp;
1568 uPtrRet.pu16[2 + cbWords + 1] = uOldSS;
1569 }
1570 }
1571 else
1572 {
1573 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1574
1575 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
1576 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1577 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1578 uPtrRet.pu64[2] = uOldRsp;
1579 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */
1580 }
1581
1582 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1583 if (rcStrict != VINF_SUCCESS)
1584 {
1585 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1586 return rcStrict;
1587 }
1588
1589 /* Chop the high bits off if 16-bit gate (Intel says so). */
1590 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1591 uNewRip = (uint16_t)uNewRip;
1592
1593 /* Limit / canonical check. */
1594 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1595 if (!IEM_IS_LONG_MODE(pVCpu))
1596 {
1597 if (uNewRip > cbLimit)
1598 {
1599 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1600 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1601 }
1602 u64Base = X86DESC_BASE(&DescCS.Legacy);
1603 }
1604 else
1605 {
1606 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1607 if (!IEM_IS_CANONICAL(uNewRip))
1608 {
1609 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1610 return iemRaiseNotCanonical(pVCpu);
1611 }
1612 u64Base = 0;
1613 }
1614
1615 /*
1616 * Now set the accessed bit before
1617 * writing the return address to the stack and committing the result into
1618 * CS, CSHID and RIP.
1619 */
1620 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1621 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1622 {
1623 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1624 if (rcStrict != VINF_SUCCESS)
1625 return rcStrict;
1626 /** @todo check what VT-x and AMD-V does. */
1627 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1628 }
1629
1630 /* Commit new CS:rIP. */
1631 pCtx->rip = uNewRip;
1632 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1633 pCtx->cs.Sel |= pVCpu->iem.s.uCpl;
1634 pCtx->cs.ValidSel = pCtx->cs.Sel;
1635 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1636 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1637 pCtx->cs.u32Limit = cbLimit;
1638 pCtx->cs.u64Base = u64Base;
1639 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1640 }
1641 else
1642 {
1643 /* Same privilege. */
1644 /** @todo: This is very similar to regular far calls; merge! */
1645
1646 /* Check stack first - may #SS(0). */
1647 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
1648 * 16-bit code cause a two or four byte CS to be pushed? */
1649 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
1650 IEM_IS_LONG_MODE(pVCpu) ? 8+8
1651 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
1652 &uPtrRet.pv, &uNewRsp);
1653 if (rcStrict != VINF_SUCCESS)
1654 return rcStrict;
1655
1656 /* Chop the high bits off if 16-bit gate (Intel says so). */
1657 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1658 uNewRip = (uint16_t)uNewRip;
1659
1660 /* Limit / canonical check. */
1661 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1662 if (!IEM_IS_LONG_MODE(pVCpu))
1663 {
1664 if (uNewRip > cbLimit)
1665 {
1666 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1667 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1668 }
1669 u64Base = X86DESC_BASE(&DescCS.Legacy);
1670 }
1671 else
1672 {
1673 if (!IEM_IS_CANONICAL(uNewRip))
1674 {
1675 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1676 return iemRaiseNotCanonical(pVCpu);
1677 }
1678 u64Base = 0;
1679 }
1680
1681 /*
1682 * Now set the accessed bit before
1683 * writing the return address to the stack and committing the result into
1684 * CS, CSHID and RIP.
1685 */
1686 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1687 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1688 {
1689 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1690 if (rcStrict != VINF_SUCCESS)
1691 return rcStrict;
1692 /** @todo check what VT-x and AMD-V does. */
1693 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1694 }
1695
1696 /* stack */
1697 if (!IEM_IS_LONG_MODE(pVCpu))
1698 {
1699 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1700 {
1701 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1702 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1703 }
1704 else
1705 {
1706 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1707 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1708 uPtrRet.pu16[1] = pCtx->cs.Sel;
1709 }
1710 }
1711 else
1712 {
1713 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1714 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1715 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1716 }
1717
1718 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1719 if (rcStrict != VINF_SUCCESS)
1720 return rcStrict;
1721
1722 /* commit */
1723 pCtx->rip = uNewRip;
1724 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1725 pCtx->cs.Sel |= pVCpu->iem.s.uCpl;
1726 pCtx->cs.ValidSel = pCtx->cs.Sel;
1727 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1728 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1729 pCtx->cs.u32Limit = cbLimit;
1730 pCtx->cs.u64Base = u64Base;
1731 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1732 }
1733 }
1734 pCtx->eflags.Bits.u1RF = 0;
1735
1736 /* Flush the prefetch buffer. */
1737# ifdef IEM_WITH_CODE_TLB
1738 pVCpu->iem.s.pbInstrBuf = NULL;
1739# else
1740 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
1741# endif
1742 return VINF_SUCCESS;
1743#endif
1744}
1745
1746
1747/**
1748 * Implements far jumps and calls thru system selectors.
1749 *
1750 * @param uSel The selector.
1751 * @param enmBranch The kind of branching we're performing.
1752 * @param enmEffOpSize The effective operand size.
1753 * @param pDesc The descriptor corresponding to @a uSel.
1754 */
1755IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1756{
1757 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1758 Assert((uSel & X86_SEL_MASK_OFF_RPL));
1759
1760 if (IEM_IS_LONG_MODE(pVCpu))
1761 switch (pDesc->Legacy.Gen.u4Type)
1762 {
1763 case AMD64_SEL_TYPE_SYS_CALL_GATE:
1764 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1765
1766 default:
1767 case AMD64_SEL_TYPE_SYS_LDT:
1768 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
1769 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
1770 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
1771 case AMD64_SEL_TYPE_SYS_INT_GATE:
1772 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1773 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1774 }
1775
1776 switch (pDesc->Legacy.Gen.u4Type)
1777 {
1778 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1779 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1780 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1781
1782 case X86_SEL_TYPE_SYS_TASK_GATE:
1783 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
1784
1785 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1786 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1787 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
1788
1789 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1790 Log(("branch %04x -> busy 286 TSS\n", uSel));
1791 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1792
1793 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1794 Log(("branch %04x -> busy 386 TSS\n", uSel));
1795 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1796
1797 default:
1798 case X86_SEL_TYPE_SYS_LDT:
1799 case X86_SEL_TYPE_SYS_286_INT_GATE:
1800 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1801 case X86_SEL_TYPE_SYS_386_INT_GATE:
1802 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1803 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1804 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1805 }
1806}
1807
1808
1809/**
1810 * Implements far jumps.
1811 *
1812 * @param uSel The selector.
1813 * @param offSeg The segment offset.
1814 * @param enmEffOpSize The effective operand size.
1815 */
1816IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1817{
1818 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1819 NOREF(cbInstr);
1820 Assert(offSeg <= UINT32_MAX);
1821
1822 /*
1823 * Real mode and V8086 mode are easy. The only snag seems to be that
1824 * CS.limit doesn't change and the limit check is done against the current
1825 * limit.
1826 */
1827 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT
1828 && IEM_IS_REAL_OR_V86_MODE(pVCpu))
1829 {
1830 if (offSeg > pCtx->cs.u32Limit)
1831 {
1832 Log(("iemCImpl_FarJmp: 16-bit limit\n"));
1833 return iemRaiseGeneralProtectionFault0(pVCpu);
1834 }
1835
1836 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
1837 pCtx->rip = offSeg;
1838 else
1839 pCtx->rip = offSeg & UINT16_MAX;
1840 pCtx->cs.Sel = uSel;
1841 pCtx->cs.ValidSel = uSel;
1842 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1843 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1844 pCtx->eflags.Bits.u1RF = 0;
1845 return VINF_SUCCESS;
1846 }
1847
1848 /*
1849 * Protected mode. Need to parse the specified descriptor...
1850 */
1851 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1852 {
1853 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1854 return iemRaiseGeneralProtectionFault0(pVCpu);
1855 }
1856
1857 /* Fetch the descriptor. */
1858 IEMSELDESC Desc;
1859 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
1860 if (rcStrict != VINF_SUCCESS)
1861 return rcStrict;
1862
1863 /* Is it there? */
1864 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
1865 {
1866 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1867 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1868 }
1869
1870 /*
1871 * Deal with it according to its type. We do the standard code selectors
1872 * here and dispatch the system selectors to worker functions.
1873 */
1874 if (!Desc.Legacy.Gen.u1DescType)
1875 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
1876
1877 /* Only code segments. */
1878 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1879 {
1880 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1881 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1882 }
1883
1884 /* L vs D. */
1885 if ( Desc.Legacy.Gen.u1Long
1886 && Desc.Legacy.Gen.u1DefBig
1887 && IEM_IS_LONG_MODE(pVCpu))
1888 {
1889 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1890 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1891 }
1892
1893 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1894 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1895 {
1896 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl)
1897 {
1898 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1899 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1900 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1901 }
1902 }
1903 else
1904 {
1905 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl)
1906 {
1907 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1908 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1909 }
1910 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl)
1911 {
1912 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl));
1913 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1914 }
1915 }
1916
1917 /* Chop the high bits if 16-bit (Intel says so). */
1918 if (enmEffOpSize == IEMMODE_16BIT)
1919 offSeg &= UINT16_MAX;
1920
1921 /* Limit check. (Should alternatively check for non-canonical addresses
1922 here, but that is ruled out by offSeg being 32-bit, right?) */
1923 uint64_t u64Base;
1924 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1925 if (Desc.Legacy.Gen.u1Long)
1926 u64Base = 0;
1927 else
1928 {
1929 if (offSeg > cbLimit)
1930 {
1931 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1932 /** @todo: Intel says this is #GP(0)! */
1933 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1934 }
1935 u64Base = X86DESC_BASE(&Desc.Legacy);
1936 }
1937
1938 /*
1939 * Ok, everything checked out fine. Now set the accessed bit before
1940 * committing the result into CS, CSHID and RIP.
1941 */
1942 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1943 {
1944 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
1945 if (rcStrict != VINF_SUCCESS)
1946 return rcStrict;
1947 /** @todo check what VT-x and AMD-V does. */
1948 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1949 }
1950
1951 /* commit */
1952 pCtx->rip = offSeg;
1953 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1954 pCtx->cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */
1955 pCtx->cs.ValidSel = pCtx->cs.Sel;
1956 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1957 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1958 pCtx->cs.u32Limit = cbLimit;
1959 pCtx->cs.u64Base = u64Base;
1960 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1961 pCtx->eflags.Bits.u1RF = 0;
1962 /** @todo check if the hidden bits are loaded correctly for 64-bit
1963 * mode. */
1964
1965 /* Flush the prefetch buffer. */
1966#ifdef IEM_WITH_CODE_TLB
1967 pVCpu->iem.s.pbInstrBuf = NULL;
1968#else
1969 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
1970#endif
1971
1972 return VINF_SUCCESS;
1973}
1974
1975
1976/**
1977 * Implements far calls.
1978 *
1979 * This very similar to iemCImpl_FarJmp.
1980 *
1981 * @param uSel The selector.
1982 * @param offSeg The segment offset.
1983 * @param enmEffOpSize The operand size (in case we need it).
1984 */
1985IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1986{
1987 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1988 VBOXSTRICTRC rcStrict;
1989 uint64_t uNewRsp;
1990 RTPTRUNION uPtrRet;
1991
1992 /*
1993 * Real mode and V8086 mode are easy. The only snag seems to be that
1994 * CS.limit doesn't change and the limit check is done against the current
1995 * limit.
1996 */
1997 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT
1998 && IEM_IS_REAL_OR_V86_MODE(pVCpu))
1999 {
2000 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
2001
2002 /* Check stack first - may #SS(0). */
2003 rcStrict = iemMemStackPushBeginSpecial(pVCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
2004 &uPtrRet.pv, &uNewRsp);
2005 if (rcStrict != VINF_SUCCESS)
2006 return rcStrict;
2007
2008 /* Check the target address range. */
2009 if (offSeg > UINT32_MAX)
2010 return iemRaiseGeneralProtectionFault0(pVCpu);
2011
2012 /* Everything is fine, push the return address. */
2013 if (enmEffOpSize == IEMMODE_16BIT)
2014 {
2015 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
2016 uPtrRet.pu16[1] = pCtx->cs.Sel;
2017 }
2018 else
2019 {
2020 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
2021 uPtrRet.pu16[3] = pCtx->cs.Sel;
2022 }
2023 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2024 if (rcStrict != VINF_SUCCESS)
2025 return rcStrict;
2026
2027 /* Branch. */
2028 pCtx->rip = offSeg;
2029 pCtx->cs.Sel = uSel;
2030 pCtx->cs.ValidSel = uSel;
2031 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2032 pCtx->cs.u64Base = (uint32_t)uSel << 4;
2033 pCtx->eflags.Bits.u1RF = 0;
2034 return VINF_SUCCESS;
2035 }
2036
2037 /*
2038 * Protected mode. Need to parse the specified descriptor...
2039 */
2040 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2041 {
2042 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
2043 return iemRaiseGeneralProtectionFault0(pVCpu);
2044 }
2045
2046 /* Fetch the descriptor. */
2047 IEMSELDESC Desc;
2048 rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
2049 if (rcStrict != VINF_SUCCESS)
2050 return rcStrict;
2051
2052 /*
2053 * Deal with it according to its type. We do the standard code selectors
2054 * here and dispatch the system selectors to worker functions.
2055 */
2056 if (!Desc.Legacy.Gen.u1DescType)
2057 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
2058
2059 /* Only code segments. */
2060 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2061 {
2062 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
2063 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2064 }
2065
2066 /* L vs D. */
2067 if ( Desc.Legacy.Gen.u1Long
2068 && Desc.Legacy.Gen.u1DefBig
2069 && IEM_IS_LONG_MODE(pVCpu))
2070 {
2071 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
2072 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2073 }
2074
2075 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
2076 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2077 {
2078 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl)
2079 {
2080 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
2081 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2082 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2083 }
2084 }
2085 else
2086 {
2087 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl)
2088 {
2089 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2090 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2091 }
2092 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl)
2093 {
2094 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl));
2095 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2096 }
2097 }
2098
2099 /* Is it there? */
2100 if (!Desc.Legacy.Gen.u1Present)
2101 {
2102 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
2103 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
2104 }
2105
2106 /* Check stack first - may #SS(0). */
2107 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
2108 * 16-bit code cause a two or four byte CS to be pushed? */
2109 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
2110 enmEffOpSize == IEMMODE_64BIT ? 8+8
2111 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2112 &uPtrRet.pv, &uNewRsp);
2113 if (rcStrict != VINF_SUCCESS)
2114 return rcStrict;
2115
2116 /* Chop the high bits if 16-bit (Intel says so). */
2117 if (enmEffOpSize == IEMMODE_16BIT)
2118 offSeg &= UINT16_MAX;
2119
2120 /* Limit / canonical check. */
2121 uint64_t u64Base;
2122 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2123 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2124 {
2125 if (!IEM_IS_CANONICAL(offSeg))
2126 {
2127 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
2128 return iemRaiseNotCanonical(pVCpu);
2129 }
2130 u64Base = 0;
2131 }
2132 else
2133 {
2134 if (offSeg > cbLimit)
2135 {
2136 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
2137 /** @todo: Intel says this is #GP(0)! */
2138 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2139 }
2140 u64Base = X86DESC_BASE(&Desc.Legacy);
2141 }
2142
2143 /*
2144 * Now set the accessed bit before
2145 * writing the return address to the stack and committing the result into
2146 * CS, CSHID and RIP.
2147 */
2148 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2149 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2150 {
2151 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2152 if (rcStrict != VINF_SUCCESS)
2153 return rcStrict;
2154 /** @todo check what VT-x and AMD-V does. */
2155 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2156 }
2157
2158 /* stack */
2159 if (enmEffOpSize == IEMMODE_16BIT)
2160 {
2161 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
2162 uPtrRet.pu16[1] = pCtx->cs.Sel;
2163 }
2164 else if (enmEffOpSize == IEMMODE_32BIT)
2165 {
2166 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
2167 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
2168 }
2169 else
2170 {
2171 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
2172 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
2173 }
2174 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2175 if (rcStrict != VINF_SUCCESS)
2176 return rcStrict;
2177
2178 /* commit */
2179 pCtx->rip = offSeg;
2180 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2181 pCtx->cs.Sel |= pVCpu->iem.s.uCpl;
2182 pCtx->cs.ValidSel = pCtx->cs.Sel;
2183 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2184 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2185 pCtx->cs.u32Limit = cbLimit;
2186 pCtx->cs.u64Base = u64Base;
2187 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
2188 pCtx->eflags.Bits.u1RF = 0;
2189 /** @todo check if the hidden bits are loaded correctly for 64-bit
2190 * mode. */
2191
2192 /* Flush the prefetch buffer. */
2193#ifdef IEM_WITH_CODE_TLB
2194 pVCpu->iem.s.pbInstrBuf = NULL;
2195#else
2196 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2197#endif
2198 return VINF_SUCCESS;
2199}
2200
2201
2202/**
2203 * Implements retf.
2204 *
2205 * @param enmEffOpSize The effective operand size.
2206 * @param cbPop The amount of arguments to pop from the stack
2207 * (bytes).
2208 */
2209IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2210{
2211 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2212 VBOXSTRICTRC rcStrict;
2213 RTCPTRUNION uPtrFrame;
2214 uint64_t uNewRsp;
2215 uint64_t uNewRip;
2216 uint16_t uNewCs;
2217 NOREF(cbInstr);
2218
2219 /*
2220 * Read the stack values first.
2221 */
2222 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
2223 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
2224 rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
2225 if (rcStrict != VINF_SUCCESS)
2226 return rcStrict;
2227 if (enmEffOpSize == IEMMODE_16BIT)
2228 {
2229 uNewRip = uPtrFrame.pu16[0];
2230 uNewCs = uPtrFrame.pu16[1];
2231 }
2232 else if (enmEffOpSize == IEMMODE_32BIT)
2233 {
2234 uNewRip = uPtrFrame.pu32[0];
2235 uNewCs = uPtrFrame.pu16[2];
2236 }
2237 else
2238 {
2239 uNewRip = uPtrFrame.pu64[0];
2240 uNewCs = uPtrFrame.pu16[4];
2241 }
2242 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2243 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2244 { /* extremely likely */ }
2245 else
2246 return rcStrict;
2247
2248 /*
2249 * Real mode and V8086 mode are easy.
2250 */
2251 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT
2252 && IEM_IS_REAL_OR_V86_MODE(pVCpu))
2253 {
2254 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2255 /** @todo check how this is supposed to work if sp=0xfffe. */
2256
2257 /* Check the limit of the new EIP. */
2258 /** @todo Intel pseudo code only does the limit check for 16-bit
2259 * operands, AMD does not make any distinction. What is right? */
2260 if (uNewRip > pCtx->cs.u32Limit)
2261 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2262
2263 /* commit the operation. */
2264 pCtx->rsp = uNewRsp;
2265 pCtx->rip = uNewRip;
2266 pCtx->cs.Sel = uNewCs;
2267 pCtx->cs.ValidSel = uNewCs;
2268 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2269 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2270 pCtx->eflags.Bits.u1RF = 0;
2271 /** @todo do we load attribs and limit as well? */
2272 if (cbPop)
2273 iemRegAddToRsp(pVCpu, pCtx, cbPop);
2274 return VINF_SUCCESS;
2275 }
2276
2277 /*
2278 * Protected mode is complicated, of course.
2279 */
2280 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2281 {
2282 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
2283 return iemRaiseGeneralProtectionFault0(pVCpu);
2284 }
2285
2286 /* Fetch the descriptor. */
2287 IEMSELDESC DescCs;
2288 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCs, uNewCs, X86_XCPT_GP);
2289 if (rcStrict != VINF_SUCCESS)
2290 return rcStrict;
2291
2292 /* Can only return to a code selector. */
2293 if ( !DescCs.Legacy.Gen.u1DescType
2294 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
2295 {
2296 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
2297 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
2298 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2299 }
2300
2301 /* L vs D. */
2302 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
2303 && DescCs.Legacy.Gen.u1DefBig
2304 && IEM_IS_LONG_MODE(pVCpu))
2305 {
2306 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
2307 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2308 }
2309
2310 /* DPL/RPL/CPL checks. */
2311 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
2312 {
2313 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl));
2314 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2315 }
2316
2317 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2318 {
2319 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
2320 {
2321 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
2322 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2323 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2324 }
2325 }
2326 else
2327 {
2328 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
2329 {
2330 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
2331 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2332 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2333 }
2334 }
2335
2336 /* Is it there? */
2337 if (!DescCs.Legacy.Gen.u1Present)
2338 {
2339 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
2340 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2341 }
2342
2343 /*
2344 * Return to outer privilege? (We'll typically have entered via a call gate.)
2345 */
2346 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
2347 {
2348 /* Read the outer stack pointer stored *after* the parameters. */
2349 rcStrict = iemMemStackPopContinueSpecial(pVCpu, cbPop + cbRetPtr, &uPtrFrame.pv, &uNewRsp);
2350 if (rcStrict != VINF_SUCCESS)
2351 return rcStrict;
2352
2353 uPtrFrame.pu8 += cbPop; /* Skip the parameters. */
2354
2355 uint16_t uNewOuterSs;
2356 uint64_t uNewOuterRsp;
2357 if (enmEffOpSize == IEMMODE_16BIT)
2358 {
2359 uNewOuterRsp = uPtrFrame.pu16[0];
2360 uNewOuterSs = uPtrFrame.pu16[1];
2361 }
2362 else if (enmEffOpSize == IEMMODE_32BIT)
2363 {
2364 uNewOuterRsp = uPtrFrame.pu32[0];
2365 uNewOuterSs = uPtrFrame.pu16[2];
2366 }
2367 else
2368 {
2369 uNewOuterRsp = uPtrFrame.pu64[0];
2370 uNewOuterSs = uPtrFrame.pu16[4];
2371 }
2372 uPtrFrame.pu8 -= cbPop; /* Put uPtrFrame back the way it was. */
2373 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2374 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2375 { /* extremely likely */ }
2376 else
2377 return rcStrict;
2378
2379 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
2380 and read the selector. */
2381 IEMSELDESC DescSs;
2382 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
2383 {
2384 if ( !DescCs.Legacy.Gen.u1Long
2385 || (uNewOuterSs & X86_SEL_RPL) == 3)
2386 {
2387 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
2388 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2389 return iemRaiseGeneralProtectionFault0(pVCpu);
2390 }
2391 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
2392 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
2393 }
2394 else
2395 {
2396 /* Fetch the descriptor for the new stack segment. */
2397 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);
2398 if (rcStrict != VINF_SUCCESS)
2399 return rcStrict;
2400 }
2401
2402 /* Check that RPL of stack and code selectors match. */
2403 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
2404 {
2405 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2406 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2407 }
2408
2409 /* Must be a writable data segment. */
2410 if ( !DescSs.Legacy.Gen.u1DescType
2411 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2412 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2413 {
2414 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
2415 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
2416 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2417 }
2418
2419 /* L vs D. (Not mentioned by intel.) */
2420 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
2421 && DescSs.Legacy.Gen.u1DefBig
2422 && IEM_IS_LONG_MODE(pVCpu))
2423 {
2424 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
2425 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2426 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2427 }
2428
2429 /* DPL/RPL/CPL checks. */
2430 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2431 {
2432 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
2433 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
2434 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2435 }
2436
2437 /* Is it there? */
2438 if (!DescSs.Legacy.Gen.u1Present)
2439 {
2440 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2441 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2442 }
2443
2444 /* Calc SS limit.*/
2445 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
2446
2447 /* Is RIP canonical or within CS.limit? */
2448 uint64_t u64Base;
2449 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2450
2451 /** @todo Testcase: Is this correct? */
2452 if ( DescCs.Legacy.Gen.u1Long
2453 && IEM_IS_LONG_MODE(pVCpu) )
2454 {
2455 if (!IEM_IS_CANONICAL(uNewRip))
2456 {
2457 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2458 return iemRaiseNotCanonical(pVCpu);
2459 }
2460 u64Base = 0;
2461 }
2462 else
2463 {
2464 if (uNewRip > cbLimitCs)
2465 {
2466 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
2467 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
2468 /** @todo: Intel says this is #GP(0)! */
2469 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2470 }
2471 u64Base = X86DESC_BASE(&DescCs.Legacy);
2472 }
2473
2474 /*
2475 * Now set the accessed bit before
2476 * writing the return address to the stack and committing the result into
2477 * CS, CSHID and RIP.
2478 */
2479 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
2480 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2481 {
2482 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2483 if (rcStrict != VINF_SUCCESS)
2484 return rcStrict;
2485 /** @todo check what VT-x and AMD-V does. */
2486 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2487 }
2488 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
2489 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2490 {
2491 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewOuterSs);
2492 if (rcStrict != VINF_SUCCESS)
2493 return rcStrict;
2494 /** @todo check what VT-x and AMD-V does. */
2495 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2496 }
2497
2498 /* commit */
2499 if (enmEffOpSize == IEMMODE_16BIT)
2500 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2501 else
2502 pCtx->rip = uNewRip;
2503 pCtx->cs.Sel = uNewCs;
2504 pCtx->cs.ValidSel = uNewCs;
2505 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2506 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2507 pCtx->cs.u32Limit = cbLimitCs;
2508 pCtx->cs.u64Base = u64Base;
2509 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
2510 pCtx->ss.Sel = uNewOuterSs;
2511 pCtx->ss.ValidSel = uNewOuterSs;
2512 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2513 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
2514 pCtx->ss.u32Limit = cbLimitSs;
2515 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2516 pCtx->ss.u64Base = 0;
2517 else
2518 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
2519 if (!pCtx->ss.Attr.n.u1DefBig)
2520 pCtx->sp = (uint16_t)uNewOuterRsp;
2521 else
2522 pCtx->rsp = uNewOuterRsp;
2523
2524 pVCpu->iem.s.uCpl = (uNewCs & X86_SEL_RPL);
2525 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2526 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2527 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2528 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2529
2530 /** @todo check if the hidden bits are loaded correctly for 64-bit
2531 * mode. */
2532
2533 if (cbPop)
2534 iemRegAddToRsp(pVCpu, pCtx, cbPop);
2535 pCtx->eflags.Bits.u1RF = 0;
2536
2537 /* Done! */
2538 }
2539 /*
2540 * Return to the same privilege level
2541 */
2542 else
2543 {
2544 /* Limit / canonical check. */
2545 uint64_t u64Base;
2546 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2547
2548 /** @todo Testcase: Is this correct? */
2549 if ( DescCs.Legacy.Gen.u1Long
2550 && IEM_IS_LONG_MODE(pVCpu) )
2551 {
2552 if (!IEM_IS_CANONICAL(uNewRip))
2553 {
2554 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
2555 return iemRaiseNotCanonical(pVCpu);
2556 }
2557 u64Base = 0;
2558 }
2559 else
2560 {
2561 if (uNewRip > cbLimitCs)
2562 {
2563 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
2564 /** @todo: Intel says this is #GP(0)! */
2565 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2566 }
2567 u64Base = X86DESC_BASE(&DescCs.Legacy);
2568 }
2569
2570 /*
2571 * Now set the accessed bit before
2572 * writing the return address to the stack and committing the result into
2573 * CS, CSHID and RIP.
2574 */
2575 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2576 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2577 {
2578 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2579 if (rcStrict != VINF_SUCCESS)
2580 return rcStrict;
2581 /** @todo check what VT-x and AMD-V does. */
2582 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2583 }
2584
2585 /* commit */
2586 if (!pCtx->ss.Attr.n.u1DefBig)
2587 pCtx->sp = (uint16_t)uNewRsp;
2588 else
2589 pCtx->rsp = uNewRsp;
2590 if (enmEffOpSize == IEMMODE_16BIT)
2591 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2592 else
2593 pCtx->rip = uNewRip;
2594 pCtx->cs.Sel = uNewCs;
2595 pCtx->cs.ValidSel = uNewCs;
2596 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2597 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2598 pCtx->cs.u32Limit = cbLimitCs;
2599 pCtx->cs.u64Base = u64Base;
2600 /** @todo check if the hidden bits are loaded correctly for 64-bit
2601 * mode. */
2602 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
2603 if (cbPop)
2604 iemRegAddToRsp(pVCpu, pCtx, cbPop);
2605 pCtx->eflags.Bits.u1RF = 0;
2606 }
2607
2608 /* Flush the prefetch buffer. */
2609#ifdef IEM_WITH_CODE_TLB
2610 pVCpu->iem.s.pbInstrBuf = NULL;
2611#else
2612 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2613#endif
2614 return VINF_SUCCESS;
2615}
2616
2617
2618/**
2619 * Implements retn.
2620 *
2621 * We're doing this in C because of the \#GP that might be raised if the popped
2622 * program counter is out of bounds.
2623 *
2624 * @param enmEffOpSize The effective operand size.
2625 * @param cbPop The amount of arguments to pop from the stack
2626 * (bytes).
2627 */
2628IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2629{
2630 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2631 NOREF(cbInstr);
2632
2633 /* Fetch the RSP from the stack. */
2634 VBOXSTRICTRC rcStrict;
2635 RTUINT64U NewRip;
2636 RTUINT64U NewRsp;
2637 NewRsp.u = pCtx->rsp;
2638
2639 switch (enmEffOpSize)
2640 {
2641 case IEMMODE_16BIT:
2642 NewRip.u = 0;
2643 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRip.Words.w0, &NewRsp);
2644 break;
2645 case IEMMODE_32BIT:
2646 NewRip.u = 0;
2647 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRip.DWords.dw0, &NewRsp);
2648 break;
2649 case IEMMODE_64BIT:
2650 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRip.u, &NewRsp);
2651 break;
2652 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2653 }
2654 if (rcStrict != VINF_SUCCESS)
2655 return rcStrict;
2656
2657 /* Check the new RSP before loading it. */
2658 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
2659 * of it. The canonical test is performed here and for call. */
2660 if (enmEffOpSize != IEMMODE_64BIT)
2661 {
2662 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
2663 {
2664 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
2665 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2666 }
2667 }
2668 else
2669 {
2670 if (!IEM_IS_CANONICAL(NewRip.u))
2671 {
2672 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
2673 return iemRaiseNotCanonical(pVCpu);
2674 }
2675 }
2676
2677 /* Apply cbPop */
2678 if (cbPop)
2679 iemRegAddToRspEx(pVCpu, pCtx, &NewRsp, cbPop);
2680
2681 /* Commit it. */
2682 pCtx->rip = NewRip.u;
2683 pCtx->rsp = NewRsp.u;
2684 pCtx->eflags.Bits.u1RF = 0;
2685
2686 /* Flush the prefetch buffer. */
2687#ifndef IEM_WITH_CODE_TLB
2688 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2689#endif
2690
2691 return VINF_SUCCESS;
2692}
2693
2694
2695/**
2696 * Implements enter.
2697 *
2698 * We're doing this in C because the instruction is insane, even for the
2699 * u8NestingLevel=0 case dealing with the stack is tedious.
2700 *
2701 * @param enmEffOpSize The effective operand size.
2702 */
2703IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
2704{
2705 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2706
2707 /* Push RBP, saving the old value in TmpRbp. */
2708 RTUINT64U NewRsp; NewRsp.u = pCtx->rsp;
2709 RTUINT64U TmpRbp; TmpRbp.u = pCtx->rbp;
2710 RTUINT64U NewRbp;
2711 VBOXSTRICTRC rcStrict;
2712 if (enmEffOpSize == IEMMODE_64BIT)
2713 {
2714 rcStrict = iemMemStackPushU64Ex(pVCpu, TmpRbp.u, &NewRsp);
2715 NewRbp = NewRsp;
2716 }
2717 else if (enmEffOpSize == IEMMODE_32BIT)
2718 {
2719 rcStrict = iemMemStackPushU32Ex(pVCpu, TmpRbp.DWords.dw0, &NewRsp);
2720 NewRbp = NewRsp;
2721 }
2722 else
2723 {
2724 rcStrict = iemMemStackPushU16Ex(pVCpu, TmpRbp.Words.w0, &NewRsp);
2725 NewRbp = TmpRbp;
2726 NewRbp.Words.w0 = NewRsp.Words.w0;
2727 }
2728 if (rcStrict != VINF_SUCCESS)
2729 return rcStrict;
2730
2731 /* Copy the parameters (aka nesting levels by Intel). */
2732 cParameters &= 0x1f;
2733 if (cParameters > 0)
2734 {
2735 switch (enmEffOpSize)
2736 {
2737 case IEMMODE_16BIT:
2738 if (pCtx->ss.Attr.n.u1DefBig)
2739 TmpRbp.DWords.dw0 -= 2;
2740 else
2741 TmpRbp.Words.w0 -= 2;
2742 do
2743 {
2744 uint16_t u16Tmp;
2745 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Tmp, &TmpRbp);
2746 if (rcStrict != VINF_SUCCESS)
2747 break;
2748 rcStrict = iemMemStackPushU16Ex(pVCpu, u16Tmp, &NewRsp);
2749 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2750 break;
2751
2752 case IEMMODE_32BIT:
2753 if (pCtx->ss.Attr.n.u1DefBig)
2754 TmpRbp.DWords.dw0 -= 4;
2755 else
2756 TmpRbp.Words.w0 -= 4;
2757 do
2758 {
2759 uint32_t u32Tmp;
2760 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Tmp, &TmpRbp);
2761 if (rcStrict != VINF_SUCCESS)
2762 break;
2763 rcStrict = iemMemStackPushU32Ex(pVCpu, u32Tmp, &NewRsp);
2764 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2765 break;
2766
2767 case IEMMODE_64BIT:
2768 TmpRbp.u -= 8;
2769 do
2770 {
2771 uint64_t u64Tmp;
2772 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Tmp, &TmpRbp);
2773 if (rcStrict != VINF_SUCCESS)
2774 break;
2775 rcStrict = iemMemStackPushU64Ex(pVCpu, u64Tmp, &NewRsp);
2776 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2777 break;
2778
2779 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2780 }
2781 if (rcStrict != VINF_SUCCESS)
2782 return VINF_SUCCESS;
2783
2784 /* Push the new RBP */
2785 if (enmEffOpSize == IEMMODE_64BIT)
2786 rcStrict = iemMemStackPushU64Ex(pVCpu, NewRbp.u, &NewRsp);
2787 else if (enmEffOpSize == IEMMODE_32BIT)
2788 rcStrict = iemMemStackPushU32Ex(pVCpu, NewRbp.DWords.dw0, &NewRsp);
2789 else
2790 rcStrict = iemMemStackPushU16Ex(pVCpu, NewRbp.Words.w0, &NewRsp);
2791 if (rcStrict != VINF_SUCCESS)
2792 return rcStrict;
2793
2794 }
2795
2796 /* Recalc RSP. */
2797 iemRegSubFromRspEx(pVCpu, pCtx, &NewRsp, cbFrame);
2798
2799 /** @todo Should probe write access at the new RSP according to AMD. */
2800
2801 /* Commit it. */
2802 pCtx->rbp = NewRbp.u;
2803 pCtx->rsp = NewRsp.u;
2804 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2805
2806 return VINF_SUCCESS;
2807}
2808
2809
2810
2811/**
2812 * Implements leave.
2813 *
2814 * We're doing this in C because messing with the stack registers is annoying
2815 * since they depends on SS attributes.
2816 *
2817 * @param enmEffOpSize The effective operand size.
2818 */
2819IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
2820{
2821 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2822
2823 /* Calculate the intermediate RSP from RBP and the stack attributes. */
2824 RTUINT64U NewRsp;
2825 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2826 NewRsp.u = pCtx->rbp;
2827 else if (pCtx->ss.Attr.n.u1DefBig)
2828 NewRsp.u = pCtx->ebp;
2829 else
2830 {
2831 /** @todo Check that LEAVE actually preserve the high EBP bits. */
2832 NewRsp.u = pCtx->rsp;
2833 NewRsp.Words.w0 = pCtx->bp;
2834 }
2835
2836 /* Pop RBP according to the operand size. */
2837 VBOXSTRICTRC rcStrict;
2838 RTUINT64U NewRbp;
2839 switch (enmEffOpSize)
2840 {
2841 case IEMMODE_16BIT:
2842 NewRbp.u = pCtx->rbp;
2843 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRbp.Words.w0, &NewRsp);
2844 break;
2845 case IEMMODE_32BIT:
2846 NewRbp.u = 0;
2847 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRbp.DWords.dw0, &NewRsp);
2848 break;
2849 case IEMMODE_64BIT:
2850 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRbp.u, &NewRsp);
2851 break;
2852 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2853 }
2854 if (rcStrict != VINF_SUCCESS)
2855 return rcStrict;
2856
2857
2858 /* Commit it. */
2859 pCtx->rbp = NewRbp.u;
2860 pCtx->rsp = NewRsp.u;
2861 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2862
2863 return VINF_SUCCESS;
2864}
2865
2866
2867/**
2868 * Implements int3 and int XX.
2869 *
2870 * @param u8Int The interrupt vector number.
2871 * @param enmInt The int instruction type.
2872 */
2873IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt)
2874{
2875 Assert(pVCpu->iem.s.cXcptRecursions == 0);
2876 return iemRaiseXcptOrInt(pVCpu,
2877 cbInstr,
2878 u8Int,
2879 IEM_XCPT_FLAGS_T_SOFT_INT | enmInt,
2880 0,
2881 0);
2882}
2883
2884
2885/**
2886 * Implements iret for real mode and V8086 mode.
2887 *
2888 * @param enmEffOpSize The effective operand size.
2889 */
2890IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
2891{
2892 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2893 X86EFLAGS Efl;
2894 Efl.u = IEMMISC_GET_EFL(pVCpu, pCtx);
2895 NOREF(cbInstr);
2896
2897 /*
2898 * iret throws an exception if VME isn't enabled.
2899 */
2900 if ( Efl.Bits.u1VM
2901 && Efl.Bits.u2IOPL != 3
2902 && !(pCtx->cr4 & X86_CR4_VME))
2903 return iemRaiseGeneralProtectionFault0(pVCpu);
2904
2905 /*
2906 * Do the stack bits, but don't commit RSP before everything checks
2907 * out right.
2908 */
2909 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2910 VBOXSTRICTRC rcStrict;
2911 RTCPTRUNION uFrame;
2912 uint16_t uNewCs;
2913 uint32_t uNewEip;
2914 uint32_t uNewFlags;
2915 uint64_t uNewRsp;
2916 if (enmEffOpSize == IEMMODE_32BIT)
2917 {
2918 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, &uFrame.pv, &uNewRsp);
2919 if (rcStrict != VINF_SUCCESS)
2920 return rcStrict;
2921 uNewEip = uFrame.pu32[0];
2922 if (uNewEip > UINT16_MAX)
2923 return iemRaiseGeneralProtectionFault0(pVCpu);
2924
2925 uNewCs = (uint16_t)uFrame.pu32[1];
2926 uNewFlags = uFrame.pu32[2];
2927 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2928 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
2929 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
2930 | X86_EFL_ID;
2931 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
2932 uNewFlags &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
2933 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
2934 }
2935 else
2936 {
2937 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, &uFrame.pv, &uNewRsp);
2938 if (rcStrict != VINF_SUCCESS)
2939 return rcStrict;
2940 uNewEip = uFrame.pu16[0];
2941 uNewCs = uFrame.pu16[1];
2942 uNewFlags = uFrame.pu16[2];
2943 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2944 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
2945 uNewFlags |= Efl.u & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF);
2946 /** @todo The intel pseudo code does not indicate what happens to
2947 * reserved flags. We just ignore them. */
2948 /* Ancient CPU adjustments: See iemCImpl_popf. */
2949 if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286)
2950 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL);
2951 }
2952 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uFrame.pv);
2953 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2954 { /* extremely likely */ }
2955 else
2956 return rcStrict;
2957
2958 /** @todo Check how this is supposed to work if sp=0xfffe. */
2959 Log7(("iemCImpl_iret_real_v8086: uNewCs=%#06x uNewRip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n",
2960 uNewCs, uNewEip, uNewFlags, uNewRsp));
2961
2962 /*
2963 * Check the limit of the new EIP.
2964 */
2965 /** @todo Only the AMD pseudo code check the limit here, what's
2966 * right? */
2967 if (uNewEip > pCtx->cs.u32Limit)
2968 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2969
2970 /*
2971 * V8086 checks and flag adjustments
2972 */
2973 if (Efl.Bits.u1VM)
2974 {
2975 if (Efl.Bits.u2IOPL == 3)
2976 {
2977 /* Preserve IOPL and clear RF. */
2978 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
2979 uNewFlags |= Efl.u & (X86_EFL_IOPL);
2980 }
2981 else if ( enmEffOpSize == IEMMODE_16BIT
2982 && ( !(uNewFlags & X86_EFL_IF)
2983 || !Efl.Bits.u1VIP )
2984 && !(uNewFlags & X86_EFL_TF) )
2985 {
2986 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
2987 uNewFlags &= ~X86_EFL_VIF;
2988 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
2989 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
2990 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
2991 }
2992 else
2993 return iemRaiseGeneralProtectionFault0(pVCpu);
2994 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags));
2995 }
2996
2997 /*
2998 * Commit the operation.
2999 */
3000#ifdef DBGFTRACE_ENABLED
3001 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
3002 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewRsp);
3003#endif
3004 pCtx->rsp = uNewRsp;
3005 pCtx->rip = uNewEip;
3006 pCtx->cs.Sel = uNewCs;
3007 pCtx->cs.ValidSel = uNewCs;
3008 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3009 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
3010 /** @todo do we load attribs and limit as well? */
3011 Assert(uNewFlags & X86_EFL_1);
3012 IEMMISC_SET_EFL(pVCpu, pCtx, uNewFlags);
3013
3014 /* Flush the prefetch buffer. */
3015#ifdef IEM_WITH_CODE_TLB
3016 pVCpu->iem.s.pbInstrBuf = NULL;
3017#else
3018 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3019#endif
3020
3021 return VINF_SUCCESS;
3022}
3023
3024
3025/**
3026 * Loads a segment register when entering V8086 mode.
3027 *
3028 * @param pSReg The segment register.
3029 * @param uSeg The segment to load.
3030 */
3031static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
3032{
3033 pSReg->Sel = uSeg;
3034 pSReg->ValidSel = uSeg;
3035 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3036 pSReg->u64Base = (uint32_t)uSeg << 4;
3037 pSReg->u32Limit = 0xffff;
3038 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
3039 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
3040 * IRET'ing to V8086. */
3041}
3042
3043
3044/**
3045 * Implements iret for protected mode returning to V8086 mode.
3046 *
3047 * @param pCtx Pointer to the CPU context.
3048 * @param uNewEip The new EIP.
3049 * @param uNewCs The new CS.
3050 * @param uNewFlags The new EFLAGS.
3051 * @param uNewRsp The RSP after the initial IRET frame.
3052 *
3053 * @note This can only be a 32-bit iret du to the X86_EFL_VM position.
3054 */
3055IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uint16_t, uNewCs,
3056 uint32_t, uNewFlags, uint64_t, uNewRsp)
3057{
3058 RT_NOREF_PV(cbInstr);
3059
3060 /*
3061 * Pop the V8086 specific frame bits off the stack.
3062 */
3063 VBOXSTRICTRC rcStrict;
3064 RTCPTRUNION uFrame;
3065 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 24, &uFrame.pv, &uNewRsp);
3066 if (rcStrict != VINF_SUCCESS)
3067 return rcStrict;
3068 uint32_t uNewEsp = uFrame.pu32[0];
3069 uint16_t uNewSs = uFrame.pu32[1];
3070 uint16_t uNewEs = uFrame.pu32[2];
3071 uint16_t uNewDs = uFrame.pu32[3];
3072 uint16_t uNewFs = uFrame.pu32[4];
3073 uint16_t uNewGs = uFrame.pu32[5];
3074 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3075 if (rcStrict != VINF_SUCCESS)
3076 return rcStrict;
3077
3078 /*
3079 * Commit the operation.
3080 */
3081 uNewFlags &= X86_EFL_LIVE_MASK;
3082 uNewFlags |= X86_EFL_RA1_MASK;
3083#ifdef DBGFTRACE_ENABLED
3084 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
3085 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);
3086#endif
3087 Log7(("iemCImpl_iret_prot_v8086: %04x:%08x -> %04x:%04x %x %04x:%04x\n", pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp));
3088
3089 IEMMISC_SET_EFL(pVCpu, pCtx, uNewFlags);
3090 iemCImplCommonV8086LoadSeg(&pCtx->cs, uNewCs);
3091 iemCImplCommonV8086LoadSeg(&pCtx->ss, uNewSs);
3092 iemCImplCommonV8086LoadSeg(&pCtx->es, uNewEs);
3093 iemCImplCommonV8086LoadSeg(&pCtx->ds, uNewDs);
3094 iemCImplCommonV8086LoadSeg(&pCtx->fs, uNewFs);
3095 iemCImplCommonV8086LoadSeg(&pCtx->gs, uNewGs);
3096 pCtx->rip = (uint16_t)uNewEip;
3097 pCtx->rsp = uNewEsp; /** @todo check this out! */
3098 pVCpu->iem.s.uCpl = 3;
3099
3100 /* Flush the prefetch buffer. */
3101#ifdef IEM_WITH_CODE_TLB
3102 pVCpu->iem.s.pbInstrBuf = NULL;
3103#else
3104 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3105#endif
3106
3107 return VINF_SUCCESS;
3108}
3109
3110
3111/**
3112 * Implements iret for protected mode returning via a nested task.
3113 *
3114 * @param enmEffOpSize The effective operand size.
3115 */
3116IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
3117{
3118 Log7(("iemCImpl_iret_prot_NestedTask:\n"));
3119#ifndef IEM_IMPLEMENTS_TASKSWITCH
3120 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
3121#else
3122 RT_NOREF_PV(enmEffOpSize);
3123
3124 /*
3125 * Read the segment selector in the link-field of the current TSS.
3126 */
3127 RTSEL uSelRet;
3128 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
3129 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &uSelRet, UINT8_MAX, pCtx->tr.u64Base);
3130 if (rcStrict != VINF_SUCCESS)
3131 return rcStrict;
3132
3133 /*
3134 * Fetch the returning task's TSS descriptor from the GDT.
3135 */
3136 if (uSelRet & X86_SEL_LDT)
3137 {
3138 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet));
3139 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet);
3140 }
3141
3142 IEMSELDESC TssDesc;
3143 rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelRet, X86_XCPT_GP);
3144 if (rcStrict != VINF_SUCCESS)
3145 return rcStrict;
3146
3147 if (TssDesc.Legacy.Gate.u1DescType)
3148 {
3149 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet));
3150 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3151 }
3152
3153 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
3154 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
3155 {
3156 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
3157 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3158 }
3159
3160 if (!TssDesc.Legacy.Gate.u1Present)
3161 {
3162 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet));
3163 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3164 }
3165
3166 uint32_t uNextEip = pCtx->eip + cbInstr;
3167 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
3168 0 /* uCr2 */, uSelRet, &TssDesc);
3169#endif
3170}
3171
3172
3173/**
3174 * Implements iret for protected mode
3175 *
3176 * @param enmEffOpSize The effective operand size.
3177 */
3178IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
3179{
3180 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
3181 NOREF(cbInstr);
3182 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3183
3184 /*
3185 * Nested task return.
3186 */
3187 if (pCtx->eflags.Bits.u1NT)
3188 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
3189
3190 /*
3191 * Normal return.
3192 *
3193 * Do the stack bits, but don't commit RSP before everything checks
3194 * out right.
3195 */
3196 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3197 VBOXSTRICTRC rcStrict;
3198 RTCPTRUNION uFrame;
3199 uint16_t uNewCs;
3200 uint32_t uNewEip;
3201 uint32_t uNewFlags;
3202 uint64_t uNewRsp;
3203 if (enmEffOpSize == IEMMODE_32BIT)
3204 {
3205 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, &uFrame.pv, &uNewRsp);
3206 if (rcStrict != VINF_SUCCESS)
3207 return rcStrict;
3208 uNewEip = uFrame.pu32[0];
3209 uNewCs = (uint16_t)uFrame.pu32[1];
3210 uNewFlags = uFrame.pu32[2];
3211 }
3212 else
3213 {
3214 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, &uFrame.pv, &uNewRsp);
3215 if (rcStrict != VINF_SUCCESS)
3216 return rcStrict;
3217 uNewEip = uFrame.pu16[0];
3218 uNewCs = uFrame.pu16[1];
3219 uNewFlags = uFrame.pu16[2];
3220 }
3221 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3222 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3223 { /* extremely likely */ }
3224 else
3225 return rcStrict;
3226 Log7(("iemCImpl_iret_prot: uNewCs=%#06x uNewEip=%#010x uNewFlags=%#x uNewRsp=%#18llx uCpl=%u\n", uNewCs, uNewEip, uNewFlags, uNewRsp, pVCpu->iem.s.uCpl));
3227
3228 /*
3229 * We're hopefully not returning to V8086 mode...
3230 */
3231 if ( (uNewFlags & X86_EFL_VM)
3232 && pVCpu->iem.s.uCpl == 0)
3233 {
3234 Assert(enmEffOpSize == IEMMODE_32BIT);
3235 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, pCtx, uNewEip, uNewCs, uNewFlags, uNewRsp);
3236 }
3237
3238 /*
3239 * Protected mode.
3240 */
3241 /* Read the CS descriptor. */
3242 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3243 {
3244 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
3245 return iemRaiseGeneralProtectionFault0(pVCpu);
3246 }
3247
3248 IEMSELDESC DescCS;
3249 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3250 if (rcStrict != VINF_SUCCESS)
3251 {
3252 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
3253 return rcStrict;
3254 }
3255
3256 /* Must be a code descriptor. */
3257 if (!DescCS.Legacy.Gen.u1DescType)
3258 {
3259 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3260 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3261 }
3262 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3263 {
3264 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3265 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3266 }
3267
3268#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3269 /* Raw ring-0 and ring-1 compression adjustments for PATM performance tricks and other CS leaks. */
3270 PVM pVM = pVCpu->CTX_SUFF(pVM);
3271 if (EMIsRawRing0Enabled(pVM) && VM_IS_RAW_MODE_ENABLED(pVM))
3272 {
3273 if ((uNewCs & X86_SEL_RPL) == 1)
3274 {
3275 if ( pVCpu->iem.s.uCpl == 0
3276 && ( !EMIsRawRing1Enabled(pVM)
3277 || pCtx->cs.Sel == (uNewCs & X86_SEL_MASK_OFF_RPL)) )
3278 {
3279 Log(("iret: Ring-0 compression fix: uNewCS=%#x -> %#x\n", uNewCs, uNewCs & X86_SEL_MASK_OFF_RPL));
3280 uNewCs &= X86_SEL_MASK_OFF_RPL;
3281 }
3282# ifdef LOG_ENABLED
3283 else if (pVCpu->iem.s.uCpl <= 1 && EMIsRawRing1Enabled(pVM))
3284 Log(("iret: uNewCs=%#x genuine return to ring-1.\n", uNewCs));
3285# endif
3286 }
3287 else if ( (uNewCs & X86_SEL_RPL) == 2
3288 && EMIsRawRing1Enabled(pVM)
3289 && pVCpu->iem.s.uCpl <= 1)
3290 {
3291 Log(("iret: Ring-1 compression fix: uNewCS=%#x -> %#x\n", uNewCs, (uNewCs & X86_SEL_MASK_OFF_RPL) | 1));
3292 uNewCs = (uNewCs & X86_SEL_MASK_OFF_RPL) | 2;
3293 }
3294 }
3295#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
3296
3297
3298 /* Privilege checks. */
3299 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3300 {
3301 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3302 {
3303 Log(("iret %04x:%08x - RPL != DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3304 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3305 }
3306 }
3307 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3308 {
3309 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3310 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3311 }
3312 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
3313 {
3314 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pVCpu->iem.s.uCpl));
3315 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3316 }
3317
3318 /* Present? */
3319 if (!DescCS.Legacy.Gen.u1Present)
3320 {
3321 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
3322 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3323 }
3324
3325 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3326
3327 /*
3328 * Return to outer level?
3329 */
3330 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
3331 {
3332 uint16_t uNewSS;
3333 uint32_t uNewESP;
3334 if (enmEffOpSize == IEMMODE_32BIT)
3335 {
3336 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 8, &uFrame.pv, &uNewRsp);
3337 if (rcStrict != VINF_SUCCESS)
3338 return rcStrict;
3339/** @todo We might be popping a 32-bit ESP from the IRET frame, but whether
3340 * 16-bit or 32-bit are being loaded into SP depends on the D/B
3341 * bit of the popped SS selector it turns out. */
3342 uNewESP = uFrame.pu32[0];
3343 uNewSS = (uint16_t)uFrame.pu32[1];
3344 }
3345 else
3346 {
3347 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 4, &uFrame.pv, &uNewRsp);
3348 if (rcStrict != VINF_SUCCESS)
3349 return rcStrict;
3350 uNewESP = uFrame.pu16[0];
3351 uNewSS = uFrame.pu16[1];
3352 }
3353 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
3354 if (rcStrict != VINF_SUCCESS)
3355 return rcStrict;
3356 Log7(("iemCImpl_iret_prot: uNewSS=%#06x uNewESP=%#010x\n", uNewSS, uNewESP));
3357
3358 /* Read the SS descriptor. */
3359 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3360 {
3361 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
3362 return iemRaiseGeneralProtectionFault0(pVCpu);
3363 }
3364
3365 IEMSELDESC DescSS;
3366 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
3367 if (rcStrict != VINF_SUCCESS)
3368 {
3369 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
3370 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
3371 return rcStrict;
3372 }
3373
3374 /* Privilege checks. */
3375 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3376 {
3377 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
3378 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3379 }
3380 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3381 {
3382 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
3383 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
3384 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3385 }
3386
3387 /* Must be a writeable data segment descriptor. */
3388 if (!DescSS.Legacy.Gen.u1DescType)
3389 {
3390 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
3391 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3392 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3393 }
3394 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3395 {
3396 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
3397 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3398 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3399 }
3400
3401 /* Present? */
3402 if (!DescSS.Legacy.Gen.u1Present)
3403 {
3404 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
3405 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
3406 }
3407
3408 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3409
3410 /* Check EIP. */
3411 if (uNewEip > cbLimitCS)
3412 {
3413 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
3414 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
3415 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3416 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3417 }
3418
3419 /*
3420 * Commit the changes, marking CS and SS accessed first since
3421 * that may fail.
3422 */
3423 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3424 {
3425 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3426 if (rcStrict != VINF_SUCCESS)
3427 return rcStrict;
3428 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3429 }
3430 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3431 {
3432 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3433 if (rcStrict != VINF_SUCCESS)
3434 return rcStrict;
3435 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3436 }
3437
3438 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3439 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3440 if (enmEffOpSize != IEMMODE_16BIT)
3441 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3442 if (pVCpu->iem.s.uCpl == 0)
3443 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3444 else if (pVCpu->iem.s.uCpl <= pCtx->eflags.Bits.u2IOPL)
3445 fEFlagsMask |= X86_EFL_IF;
3446 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3447 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3448 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu, pCtx);
3449 fEFlagsNew &= ~fEFlagsMask;
3450 fEFlagsNew |= uNewFlags & fEFlagsMask;
3451#ifdef DBGFTRACE_ENABLED
3452 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
3453 pVCpu->iem.s.uCpl, uNewCs & X86_SEL_RPL, pCtx->cs.Sel, pCtx->eip,
3454 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP);
3455#endif
3456
3457 IEMMISC_SET_EFL(pVCpu, pCtx, fEFlagsNew);
3458 pCtx->rip = uNewEip;
3459 pCtx->cs.Sel = uNewCs;
3460 pCtx->cs.ValidSel = uNewCs;
3461 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3462 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3463 pCtx->cs.u32Limit = cbLimitCS;
3464 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3465 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
3466
3467 pCtx->ss.Sel = uNewSS;
3468 pCtx->ss.ValidSel = uNewSS;
3469 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3470 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3471 pCtx->ss.u32Limit = cbLimitSs;
3472 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3473 if (!pCtx->ss.Attr.n.u1DefBig)
3474 pCtx->sp = (uint16_t)uNewESP;
3475 else
3476 pCtx->rsp = uNewESP;
3477
3478 pVCpu->iem.s.uCpl = uNewCs & X86_SEL_RPL;
3479 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
3480 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
3481 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
3482 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
3483
3484 /* Done! */
3485
3486 }
3487 /*
3488 * Return to the same level.
3489 */
3490 else
3491 {
3492 /* Check EIP. */
3493 if (uNewEip > cbLimitCS)
3494 {
3495 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
3496 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3497 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3498 }
3499
3500 /*
3501 * Commit the changes, marking CS first since it may fail.
3502 */
3503 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3504 {
3505 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3506 if (rcStrict != VINF_SUCCESS)
3507 return rcStrict;
3508 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3509 }
3510
3511 X86EFLAGS NewEfl;
3512 NewEfl.u = IEMMISC_GET_EFL(pVCpu, pCtx);
3513 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3514 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3515 if (enmEffOpSize != IEMMODE_16BIT)
3516 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3517 if (pVCpu->iem.s.uCpl == 0)
3518 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3519 else if (pVCpu->iem.s.uCpl <= NewEfl.Bits.u2IOPL)
3520 fEFlagsMask |= X86_EFL_IF;
3521 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3522 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3523 NewEfl.u &= ~fEFlagsMask;
3524 NewEfl.u |= fEFlagsMask & uNewFlags;
3525#ifdef DBGFTRACE_ENABLED
3526 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
3527 pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip,
3528 uNewCs, uNewEip, uNewFlags, pCtx->ss.Sel, uNewRsp);
3529#endif
3530
3531 IEMMISC_SET_EFL(pVCpu, pCtx, NewEfl.u);
3532 pCtx->rip = uNewEip;
3533 pCtx->cs.Sel = uNewCs;
3534 pCtx->cs.ValidSel = uNewCs;
3535 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3536 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3537 pCtx->cs.u32Limit = cbLimitCS;
3538 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3539 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
3540 if (!pCtx->ss.Attr.n.u1DefBig)
3541 pCtx->sp = (uint16_t)uNewRsp;
3542 else
3543 pCtx->rsp = uNewRsp;
3544 /* Done! */
3545 }
3546
3547 /* Flush the prefetch buffer. */
3548#ifdef IEM_WITH_CODE_TLB
3549 pVCpu->iem.s.pbInstrBuf = NULL;
3550#else
3551 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3552#endif
3553
3554 return VINF_SUCCESS;
3555}
3556
3557
3558/**
3559 * Implements iret for long mode
3560 *
3561 * @param enmEffOpSize The effective operand size.
3562 */
3563IEM_CIMPL_DEF_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize)
3564{
3565 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
3566 NOREF(cbInstr);
3567
3568 /*
3569 * Nested task return is not supported in long mode.
3570 */
3571 if (pCtx->eflags.Bits.u1NT)
3572 {
3573 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pCtx->eflags.u));
3574 return iemRaiseGeneralProtectionFault0(pVCpu);
3575 }
3576
3577 /*
3578 * Normal return.
3579 *
3580 * Do the stack bits, but don't commit RSP before everything checks
3581 * out right.
3582 */
3583 VBOXSTRICTRC rcStrict;
3584 RTCPTRUNION uFrame;
3585 uint64_t uNewRip;
3586 uint16_t uNewCs;
3587 uint16_t uNewSs;
3588 uint32_t uNewFlags;
3589 uint64_t uNewRsp;
3590 if (enmEffOpSize == IEMMODE_64BIT)
3591 {
3592 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, &uFrame.pv, &uNewRsp);
3593 if (rcStrict != VINF_SUCCESS)
3594 return rcStrict;
3595 uNewRip = uFrame.pu64[0];
3596 uNewCs = (uint16_t)uFrame.pu64[1];
3597 uNewFlags = (uint32_t)uFrame.pu64[2];
3598 uNewRsp = uFrame.pu64[3];
3599 uNewSs = (uint16_t)uFrame.pu64[4];
3600 }
3601 else if (enmEffOpSize == IEMMODE_32BIT)
3602 {
3603 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, &uFrame.pv, &uNewRsp);
3604 if (rcStrict != VINF_SUCCESS)
3605 return rcStrict;
3606 uNewRip = uFrame.pu32[0];
3607 uNewCs = (uint16_t)uFrame.pu32[1];
3608 uNewFlags = uFrame.pu32[2];
3609 uNewRsp = uFrame.pu32[3];
3610 uNewSs = (uint16_t)uFrame.pu32[4];
3611 }
3612 else
3613 {
3614 Assert(enmEffOpSize == IEMMODE_16BIT);
3615 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, &uFrame.pv, &uNewRsp);
3616 if (rcStrict != VINF_SUCCESS)
3617 return rcStrict;
3618 uNewRip = uFrame.pu16[0];
3619 uNewCs = uFrame.pu16[1];
3620 uNewFlags = uFrame.pu16[2];
3621 uNewRsp = uFrame.pu16[3];
3622 uNewSs = uFrame.pu16[4];
3623 }
3624 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3625 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3626 { /* extremely like */ }
3627 else
3628 return rcStrict;
3629 Log7(("iretq stack: cs:rip=%04x:%016RX64 rflags=%016RX64 ss:rsp=%04x:%016RX64\n", uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
3630
3631 /*
3632 * Check stuff.
3633 */
3634 /* Read the CS descriptor. */
3635 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3636 {
3637 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3638 return iemRaiseGeneralProtectionFault0(pVCpu);
3639 }
3640
3641 IEMSELDESC DescCS;
3642 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3643 if (rcStrict != VINF_SUCCESS)
3644 {
3645 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
3646 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3647 return rcStrict;
3648 }
3649
3650 /* Must be a code descriptor. */
3651 if ( !DescCS.Legacy.Gen.u1DescType
3652 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3653 {
3654 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
3655 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3656 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3657 }
3658
3659 /* Privilege checks. */
3660 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
3661 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3662 {
3663 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3664 {
3665 Log(("iret %04x:%016RX64 - RPL != DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3666 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3667 }
3668 }
3669 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3670 {
3671 Log(("iret %04x:%016RX64 - RPL < DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3672 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3673 }
3674 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
3675 {
3676 Log(("iret %04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl));
3677 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3678 }
3679
3680 /* Present? */
3681 if (!DescCS.Legacy.Gen.u1Present)
3682 {
3683 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3684 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3685 }
3686
3687 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3688
3689 /* Read the SS descriptor. */
3690 IEMSELDESC DescSS;
3691 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3692 {
3693 if ( !DescCS.Legacy.Gen.u1Long
3694 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
3695 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
3696 {
3697 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3698 return iemRaiseGeneralProtectionFault0(pVCpu);
3699 }
3700 DescSS.Legacy.u = 0;
3701 }
3702 else
3703 {
3704 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
3705 if (rcStrict != VINF_SUCCESS)
3706 {
3707 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
3708 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3709 return rcStrict;
3710 }
3711 }
3712
3713 /* Privilege checks. */
3714 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3715 {
3716 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3717 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3718 }
3719
3720 uint32_t cbLimitSs;
3721 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3722 cbLimitSs = UINT32_MAX;
3723 else
3724 {
3725 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3726 {
3727 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
3728 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
3729 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3730 }
3731
3732 /* Must be a writeable data segment descriptor. */
3733 if (!DescSS.Legacy.Gen.u1DescType)
3734 {
3735 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
3736 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3737 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3738 }
3739 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3740 {
3741 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
3742 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3743 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3744 }
3745
3746 /* Present? */
3747 if (!DescSS.Legacy.Gen.u1Present)
3748 {
3749 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3750 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSs);
3751 }
3752 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3753 }
3754
3755 /* Check EIP. */
3756 if (DescCS.Legacy.Gen.u1Long)
3757 {
3758 if (!IEM_IS_CANONICAL(uNewRip))
3759 {
3760 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
3761 uNewCs, uNewRip, uNewSs, uNewRsp));
3762 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3763 }
3764 }
3765 else
3766 {
3767 if (uNewRip > cbLimitCS)
3768 {
3769 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
3770 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
3771 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3772 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3773 }
3774 }
3775
3776 /*
3777 * Commit the changes, marking CS and SS accessed first since
3778 * that may fail.
3779 */
3780 /** @todo where exactly are these actually marked accessed by a real CPU? */
3781 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3782 {
3783 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3784 if (rcStrict != VINF_SUCCESS)
3785 return rcStrict;
3786 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3787 }
3788 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3789 {
3790 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSs);
3791 if (rcStrict != VINF_SUCCESS)
3792 return rcStrict;
3793 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3794 }
3795
3796 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3797 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3798 if (enmEffOpSize != IEMMODE_16BIT)
3799 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3800 if (pVCpu->iem.s.uCpl == 0)
3801 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
3802 else if (pVCpu->iem.s.uCpl <= pCtx->eflags.Bits.u2IOPL)
3803 fEFlagsMask |= X86_EFL_IF;
3804 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu, pCtx);
3805 fEFlagsNew &= ~fEFlagsMask;
3806 fEFlagsNew |= uNewFlags & fEFlagsMask;
3807#ifdef DBGFTRACE_ENABLED
3808 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
3809 pVCpu->iem.s.uCpl, uNewCpl, pCtx->rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);
3810#endif
3811
3812 IEMMISC_SET_EFL(pVCpu, pCtx, fEFlagsNew);
3813 pCtx->rip = uNewRip;
3814 pCtx->cs.Sel = uNewCs;
3815 pCtx->cs.ValidSel = uNewCs;
3816 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3817 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3818 pCtx->cs.u32Limit = cbLimitCS;
3819 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3820 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
3821 if (pCtx->cs.Attr.n.u1Long || pCtx->cs.Attr.n.u1DefBig)
3822 pCtx->rsp = uNewRsp;
3823 else
3824 pCtx->sp = (uint16_t)uNewRsp;
3825 pCtx->ss.Sel = uNewSs;
3826 pCtx->ss.ValidSel = uNewSs;
3827 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3828 {
3829 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3830 pCtx->ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
3831 pCtx->ss.u32Limit = UINT32_MAX;
3832 pCtx->ss.u64Base = 0;
3833 Log2(("iretq new SS: NULL\n"));
3834 }
3835 else
3836 {
3837 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3838 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3839 pCtx->ss.u32Limit = cbLimitSs;
3840 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3841 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u));
3842 }
3843
3844 if (pVCpu->iem.s.uCpl != uNewCpl)
3845 {
3846 pVCpu->iem.s.uCpl = uNewCpl;
3847 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->ds);
3848 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->es);
3849 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->fs);
3850 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->gs);
3851 }
3852
3853 /* Flush the prefetch buffer. */
3854#ifdef IEM_WITH_CODE_TLB
3855 pVCpu->iem.s.pbInstrBuf = NULL;
3856#else
3857 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3858#endif
3859
3860 return VINF_SUCCESS;
3861}
3862
3863
3864/**
3865 * Implements iret.
3866 *
3867 * @param enmEffOpSize The effective operand size.
3868 */
3869IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
3870{
3871 /*
3872 * First, clear NMI blocking, if any, before causing any exceptions.
3873 */
3874 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
3875
3876 /*
3877 * The SVM nested-guest intercept for iret takes priority over all exceptions,
3878 * see AMD spec. "15.9 Instruction Intercepts".
3879 */
3880 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IRET))
3881 {
3882 Log(("iret: Guest intercept -> #VMEXIT\n"));
3883 IEM_SVM_UPDATE_NRIP(pVCpu);
3884 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3885 }
3886
3887 /*
3888 * Call a mode specific worker.
3889 */
3890 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
3891 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
3892 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
3893 return IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize);
3894 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
3895}
3896
3897
3898/**
3899 * Implements SYSCALL (AMD and Intel64).
3900 *
3901 * @param enmEffOpSize The effective operand size.
3902 */
3903IEM_CIMPL_DEF_0(iemCImpl_syscall)
3904{
3905 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
3906
3907 /*
3908 * Check preconditions.
3909 *
3910 * Note that CPUs described in the documentation may load a few odd values
3911 * into CS and SS than we allow here. This has yet to be checked on real
3912 * hardware.
3913 */
3914 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
3915 {
3916 Log(("syscall: Not enabled in EFER -> #UD\n"));
3917 return iemRaiseUndefinedOpcode(pVCpu);
3918 }
3919 if (!(pCtx->cr0 & X86_CR0_PE))
3920 {
3921 Log(("syscall: Protected mode is required -> #GP(0)\n"));
3922 return iemRaiseGeneralProtectionFault0(pVCpu);
3923 }
3924 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(pCtx))
3925 {
3926 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3927 return iemRaiseUndefinedOpcode(pVCpu);
3928 }
3929
3930 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
3931 /** @todo what about LDT selectors? Shouldn't matter, really. */
3932 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
3933 uint16_t uNewSs = uNewCs + 8;
3934 if (uNewCs == 0 || uNewSs == 0)
3935 {
3936 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
3937 return iemRaiseGeneralProtectionFault0(pVCpu);
3938 }
3939
3940 /* Long mode and legacy mode differs. */
3941 if (CPUMIsGuestInLongModeEx(pCtx))
3942 {
3943 uint64_t uNewRip = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->msrLSTAR : pCtx-> msrCSTAR;
3944
3945 /* This test isn't in the docs, but I'm not trusting the guys writing
3946 the MSRs to have validated the values as canonical like they should. */
3947 if (!IEM_IS_CANONICAL(uNewRip))
3948 {
3949 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3950 return iemRaiseUndefinedOpcode(pVCpu);
3951 }
3952
3953 /*
3954 * Commit it.
3955 */
3956 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, uNewRip));
3957 pCtx->rcx = pCtx->rip + cbInstr;
3958 pCtx->rip = uNewRip;
3959
3960 pCtx->rflags.u &= ~X86_EFL_RF;
3961 pCtx->r11 = pCtx->rflags.u;
3962 pCtx->rflags.u &= ~pCtx->msrSFMASK;
3963 pCtx->rflags.u |= X86_EFL_1;
3964
3965 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3966 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3967 }
3968 else
3969 {
3970 /*
3971 * Commit it.
3972 */
3973 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n",
3974 pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, (uint32_t)(pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
3975 pCtx->rcx = pCtx->eip + cbInstr;
3976 pCtx->rip = pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
3977 pCtx->rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
3978
3979 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3980 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3981 }
3982 pCtx->cs.Sel = uNewCs;
3983 pCtx->cs.ValidSel = uNewCs;
3984 pCtx->cs.u64Base = 0;
3985 pCtx->cs.u32Limit = UINT32_MAX;
3986 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3987
3988 pCtx->ss.Sel = uNewSs;
3989 pCtx->ss.ValidSel = uNewSs;
3990 pCtx->ss.u64Base = 0;
3991 pCtx->ss.u32Limit = UINT32_MAX;
3992 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3993
3994 /* Flush the prefetch buffer. */
3995#ifdef IEM_WITH_CODE_TLB
3996 pVCpu->iem.s.pbInstrBuf = NULL;
3997#else
3998 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3999#endif
4000
4001 return VINF_SUCCESS;
4002}
4003
4004
4005/**
4006 * Implements SYSRET (AMD and Intel64).
4007 */
4008IEM_CIMPL_DEF_0(iemCImpl_sysret)
4009
4010{
4011 RT_NOREF_PV(cbInstr);
4012 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4013
4014 /*
4015 * Check preconditions.
4016 *
4017 * Note that CPUs described in the documentation may load a few odd values
4018 * into CS and SS than we allow here. This has yet to be checked on real
4019 * hardware.
4020 */
4021 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
4022 {
4023 Log(("sysret: Not enabled in EFER -> #UD\n"));
4024 return iemRaiseUndefinedOpcode(pVCpu);
4025 }
4026 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(pCtx))
4027 {
4028 Log(("sysret: Only available in long mode on intel -> #UD\n"));
4029 return iemRaiseUndefinedOpcode(pVCpu);
4030 }
4031 if (!(pCtx->cr0 & X86_CR0_PE))
4032 {
4033 Log(("sysret: Protected mode is required -> #GP(0)\n"));
4034 return iemRaiseGeneralProtectionFault0(pVCpu);
4035 }
4036 if (pVCpu->iem.s.uCpl != 0)
4037 {
4038 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
4039 return iemRaiseGeneralProtectionFault0(pVCpu);
4040 }
4041
4042 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
4043 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4044 uint16_t uNewSs = uNewCs + 8;
4045 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
4046 uNewCs += 16;
4047 if (uNewCs == 0 || uNewSs == 0)
4048 {
4049 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4050 return iemRaiseGeneralProtectionFault0(pVCpu);
4051 }
4052
4053 /*
4054 * Commit it.
4055 */
4056 if (CPUMIsGuestInLongModeEx(pCtx))
4057 {
4058 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
4059 {
4060 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n",
4061 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->rcx, pCtx->r11));
4062 /* Note! We disregard intel manual regarding the RCX cananonical
4063 check, ask intel+xen why AMD doesn't do it. */
4064 pCtx->rip = pCtx->rcx;
4065 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4066 | (3 << X86DESCATTR_DPL_SHIFT);
4067 }
4068 else
4069 {
4070 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n",
4071 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->ecx, pCtx->r11));
4072 pCtx->rip = pCtx->ecx;
4073 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4074 | (3 << X86DESCATTR_DPL_SHIFT);
4075 }
4076 /** @todo testcase: See what kind of flags we can make SYSRET restore and
4077 * what it really ignores. RF and VM are hinted at being zero, by AMD. */
4078 pCtx->rflags.u = pCtx->r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
4079 pCtx->rflags.u |= X86_EFL_1;
4080 }
4081 else
4082 {
4083 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, pCtx->ecx));
4084 pCtx->rip = pCtx->rcx;
4085 pCtx->rflags.u |= X86_EFL_IF;
4086 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4087 | (3 << X86DESCATTR_DPL_SHIFT);
4088 }
4089 pCtx->cs.Sel = uNewCs | 3;
4090 pCtx->cs.ValidSel = uNewCs | 3;
4091 pCtx->cs.u64Base = 0;
4092 pCtx->cs.u32Limit = UINT32_MAX;
4093 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4094
4095 pCtx->ss.Sel = uNewSs | 3;
4096 pCtx->ss.ValidSel = uNewSs | 3;
4097 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4098 /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */
4099 pCtx->ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
4100 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
4101 * on sysret. */
4102
4103 /* Flush the prefetch buffer. */
4104#ifdef IEM_WITH_CODE_TLB
4105 pVCpu->iem.s.pbInstrBuf = NULL;
4106#else
4107 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4108#endif
4109
4110 return VINF_SUCCESS;
4111}
4112
4113
4114/**
4115 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
4116 *
4117 * @param iSegReg The segment register number (valid).
4118 * @param uSel The new selector value.
4119 */
4120IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
4121{
4122 /*PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);*/
4123 uint16_t *pSel = iemSRegRef(pVCpu, iSegReg);
4124 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4125
4126 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
4127
4128 /*
4129 * Real mode and V8086 mode are easy.
4130 */
4131 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT
4132 && IEM_IS_REAL_OR_V86_MODE(pVCpu))
4133 {
4134 *pSel = uSel;
4135 pHid->u64Base = (uint32_t)uSel << 4;
4136 pHid->ValidSel = uSel;
4137 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4138#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
4139 /** @todo Does the CPU actually load limits and attributes in the
4140 * real/V8086 mode segment load case? It doesn't for CS in far
4141 * jumps... Affects unreal mode. */
4142 pHid->u32Limit = 0xffff;
4143 pHid->Attr.u = 0;
4144 pHid->Attr.n.u1Present = 1;
4145 pHid->Attr.n.u1DescType = 1;
4146 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
4147 ? X86_SEL_TYPE_RW
4148 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
4149#endif
4150 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4151 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4152 return VINF_SUCCESS;
4153 }
4154
4155 /*
4156 * Protected mode.
4157 *
4158 * Check if it's a null segment selector value first, that's OK for DS, ES,
4159 * FS and GS. If not null, then we have to load and parse the descriptor.
4160 */
4161 if (!(uSel & X86_SEL_MASK_OFF_RPL))
4162 {
4163 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
4164 if (iSegReg == X86_SREG_SS)
4165 {
4166 /* In 64-bit kernel mode, the stack can be 0 because of the way
4167 interrupts are dispatched. AMD seems to have a slighly more
4168 relaxed relationship to SS.RPL than intel does. */
4169 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
4170 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4171 || pVCpu->iem.s.uCpl > 2
4172 || ( uSel != pVCpu->iem.s.uCpl
4173 && !IEM_IS_GUEST_CPU_AMD(pVCpu)) )
4174 {
4175 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
4176 return iemRaiseGeneralProtectionFault0(pVCpu);
4177 }
4178 }
4179
4180 *pSel = uSel; /* Not RPL, remember :-) */
4181 iemHlpLoadNullDataSelectorProt(pVCpu, pHid, uSel);
4182 if (iSegReg == X86_SREG_SS)
4183 pHid->Attr.u |= pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT;
4184
4185 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4186 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4187
4188 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4189 return VINF_SUCCESS;
4190 }
4191
4192 /* Fetch the descriptor. */
4193 IEMSELDESC Desc;
4194 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
4195 if (rcStrict != VINF_SUCCESS)
4196 return rcStrict;
4197
4198 /* Check GPs first. */
4199 if (!Desc.Legacy.Gen.u1DescType)
4200 {
4201 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
4202 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4203 }
4204 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
4205 {
4206 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4207 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
4208 {
4209 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
4210 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4211 }
4212 if ((uSel & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
4213 {
4214 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pVCpu->iem.s.uCpl));
4215 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4216 }
4217 if (Desc.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl)
4218 {
4219 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4220 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4221 }
4222 }
4223 else
4224 {
4225 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4226 {
4227 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
4228 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4229 }
4230 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4231 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4232 {
4233#if 0 /* this is what intel says. */
4234 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4235 && pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4236 {
4237 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
4238 iSegReg, uSel, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl));
4239 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4240 }
4241#else /* this is what makes more sense. */
4242 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4243 {
4244 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4245 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4246 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4247 }
4248 if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4249 {
4250 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4251 iSegReg, uSel, pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl));
4252 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4253 }
4254#endif
4255 }
4256 }
4257
4258 /* Is it there? */
4259 if (!Desc.Legacy.Gen.u1Present)
4260 {
4261 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4262 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
4263 }
4264
4265 /* The base and limit. */
4266 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4267 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
4268
4269 /*
4270 * Ok, everything checked out fine. Now set the accessed bit before
4271 * committing the result into the registers.
4272 */
4273 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4274 {
4275 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
4276 if (rcStrict != VINF_SUCCESS)
4277 return rcStrict;
4278 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4279 }
4280
4281 /* commit */
4282 *pSel = uSel;
4283 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4284 pHid->u32Limit = cbLimit;
4285 pHid->u64Base = u64Base;
4286 pHid->ValidSel = uSel;
4287 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4288
4289 /** @todo check if the hidden bits are loaded correctly for 64-bit
4290 * mode. */
4291 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4292
4293 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4294 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4295 return VINF_SUCCESS;
4296}
4297
4298
4299/**
4300 * Implements 'mov SReg, r/m'.
4301 *
4302 * @param iSegReg The segment register number (valid).
4303 * @param uSel The new selector value.
4304 */
4305IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
4306{
4307 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4308 if (rcStrict == VINF_SUCCESS)
4309 {
4310 if (iSegReg == X86_SREG_SS)
4311 {
4312 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4313 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
4314 }
4315 }
4316 return rcStrict;
4317}
4318
4319
4320/**
4321 * Implements 'pop SReg'.
4322 *
4323 * @param iSegReg The segment register number (valid).
4324 * @param enmEffOpSize The efficient operand size (valid).
4325 */
4326IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4327{
4328 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4329 VBOXSTRICTRC rcStrict;
4330
4331 /*
4332 * Read the selector off the stack and join paths with mov ss, reg.
4333 */
4334 RTUINT64U TmpRsp;
4335 TmpRsp.u = pCtx->rsp;
4336 switch (enmEffOpSize)
4337 {
4338 case IEMMODE_16BIT:
4339 {
4340 uint16_t uSel;
4341 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp);
4342 if (rcStrict == VINF_SUCCESS)
4343 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4344 break;
4345 }
4346
4347 case IEMMODE_32BIT:
4348 {
4349 uint32_t u32Value;
4350 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp);
4351 if (rcStrict == VINF_SUCCESS)
4352 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
4353 break;
4354 }
4355
4356 case IEMMODE_64BIT:
4357 {
4358 uint64_t u64Value;
4359 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp);
4360 if (rcStrict == VINF_SUCCESS)
4361 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
4362 break;
4363 }
4364 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4365 }
4366
4367 /*
4368 * Commit the stack on success.
4369 */
4370 if (rcStrict == VINF_SUCCESS)
4371 {
4372 pCtx->rsp = TmpRsp.u;
4373 if (iSegReg == X86_SREG_SS)
4374 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
4375 }
4376 return rcStrict;
4377}
4378
4379
4380/**
4381 * Implements lgs, lfs, les, lds & lss.
4382 */
4383IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
4384 uint16_t, uSel,
4385 uint64_t, offSeg,
4386 uint8_t, iSegReg,
4387 uint8_t, iGReg,
4388 IEMMODE, enmEffOpSize)
4389{
4390 /*PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);*/
4391 VBOXSTRICTRC rcStrict;
4392
4393 /*
4394 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
4395 */
4396 /** @todo verify and test that mov, pop and lXs works the segment
4397 * register loading in the exact same way. */
4398 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4399 if (rcStrict == VINF_SUCCESS)
4400 {
4401 switch (enmEffOpSize)
4402 {
4403 case IEMMODE_16BIT:
4404 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4405 break;
4406 case IEMMODE_32BIT:
4407 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4408 break;
4409 case IEMMODE_64BIT:
4410 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4411 break;
4412 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4413 }
4414 }
4415
4416 return rcStrict;
4417}
4418
4419
4420/**
4421 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
4422 *
4423 * @retval VINF_SUCCESS on success.
4424 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
4425 * @retval iemMemFetchSysU64 return value.
4426 *
4427 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4428 * @param uSel The selector value.
4429 * @param fAllowSysDesc Whether system descriptors are OK or not.
4430 * @param pDesc Where to return the descriptor on success.
4431 */
4432static VBOXSTRICTRC iemCImpl_LoadDescHelper(PVMCPU pVCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
4433{
4434 pDesc->Long.au64[0] = 0;
4435 pDesc->Long.au64[1] = 0;
4436
4437 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */
4438 return VINF_IEM_SELECTOR_NOT_OK;
4439
4440 /* Within the table limits? */
4441 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4442 RTGCPTR GCPtrBase;
4443 if (uSel & X86_SEL_LDT)
4444 {
4445 if ( !pCtx->ldtr.Attr.n.u1Present
4446 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
4447 return VINF_IEM_SELECTOR_NOT_OK;
4448 GCPtrBase = pCtx->ldtr.u64Base;
4449 }
4450 else
4451 {
4452 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
4453 return VINF_IEM_SELECTOR_NOT_OK;
4454 GCPtrBase = pCtx->gdtr.pGdt;
4455 }
4456
4457 /* Fetch the descriptor. */
4458 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4459 if (rcStrict != VINF_SUCCESS)
4460 return rcStrict;
4461 if (!pDesc->Legacy.Gen.u1DescType)
4462 {
4463 if (!fAllowSysDesc)
4464 return VINF_IEM_SELECTOR_NOT_OK;
4465 if (CPUMIsGuestInLongModeEx(pCtx))
4466 {
4467 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
4468 if (rcStrict != VINF_SUCCESS)
4469 return rcStrict;
4470 }
4471
4472 }
4473
4474 return VINF_SUCCESS;
4475}
4476
4477
4478/**
4479 * Implements verr (fWrite = false) and verw (fWrite = true).
4480 */
4481IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite)
4482{
4483 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4484
4485 /** @todo figure whether the accessed bit is set or not. */
4486
4487 bool fAccessible = true;
4488 IEMSELDESC Desc;
4489 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4490 if (rcStrict == VINF_SUCCESS)
4491 {
4492 /* Check the descriptor, order doesn't matter much here. */
4493 if ( !Desc.Legacy.Gen.u1DescType
4494 || !Desc.Legacy.Gen.u1Present)
4495 fAccessible = false;
4496 else
4497 {
4498 if ( fWrite
4499 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE
4500 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4501 fAccessible = false;
4502
4503 /** @todo testcase for the conforming behavior. */
4504 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4505 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4506 {
4507 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4508 fAccessible = false;
4509 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4510 fAccessible = false;
4511 }
4512 }
4513
4514 }
4515 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4516 fAccessible = false;
4517 else
4518 return rcStrict;
4519
4520 /* commit */
4521 IEM_GET_CTX(pVCpu)->eflags.Bits.u1ZF = fAccessible;
4522
4523 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4524 return VINF_SUCCESS;
4525}
4526
4527
4528/**
4529 * Implements LAR and LSL with 64-bit operand size.
4530 *
4531 * @returns VINF_SUCCESS.
4532 * @param pu16Dst Pointer to the destination register.
4533 * @param uSel The selector to load details for.
4534 * @param fIsLar true = LAR, false = LSL.
4535 */
4536IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar)
4537{
4538 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4539
4540 /** @todo figure whether the accessed bit is set or not. */
4541
4542 bool fDescOk = true;
4543 IEMSELDESC Desc;
4544 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4545 if (rcStrict == VINF_SUCCESS)
4546 {
4547 /*
4548 * Check the descriptor type.
4549 */
4550 if (!Desc.Legacy.Gen.u1DescType)
4551 {
4552 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4553 {
4554 if (Desc.Long.Gen.u5Zeros)
4555 fDescOk = false;
4556 else
4557 switch (Desc.Long.Gen.u4Type)
4558 {
4559 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
4560 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
4561 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
4562 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
4563 break;
4564 case AMD64_SEL_TYPE_SYS_CALL_GATE:
4565 fDescOk = fIsLar;
4566 break;
4567 default:
4568 fDescOk = false;
4569 break;
4570 }
4571 }
4572 else
4573 {
4574 switch (Desc.Long.Gen.u4Type)
4575 {
4576 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4577 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4578 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4579 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4580 case X86_SEL_TYPE_SYS_LDT:
4581 break;
4582 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4583 case X86_SEL_TYPE_SYS_TASK_GATE:
4584 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4585 fDescOk = fIsLar;
4586 break;
4587 default:
4588 fDescOk = false;
4589 break;
4590 }
4591 }
4592 }
4593 if (fDescOk)
4594 {
4595 /*
4596 * Check the RPL/DPL/CPL interaction..
4597 */
4598 /** @todo testcase for the conforming behavior. */
4599 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
4600 || !Desc.Legacy.Gen.u1DescType)
4601 {
4602 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4603 fDescOk = false;
4604 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4605 fDescOk = false;
4606 }
4607 }
4608
4609 if (fDescOk)
4610 {
4611 /*
4612 * All fine, start committing the result.
4613 */
4614 if (fIsLar)
4615 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00);
4616 else
4617 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy);
4618 }
4619
4620 }
4621 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4622 fDescOk = false;
4623 else
4624 return rcStrict;
4625
4626 /* commit flags value and advance rip. */
4627 IEM_GET_CTX(pVCpu)->eflags.Bits.u1ZF = fDescOk;
4628 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4629
4630 return VINF_SUCCESS;
4631}
4632
4633
4634/**
4635 * Implements LAR and LSL with 16-bit operand size.
4636 *
4637 * @returns VINF_SUCCESS.
4638 * @param pu16Dst Pointer to the destination register.
4639 * @param u16Sel The selector to load details for.
4640 * @param fIsLar true = LAR, false = LSL.
4641 */
4642IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar)
4643{
4644 uint64_t u64TmpDst = *pu16Dst;
4645 IEM_CIMPL_CALL_3(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, fIsLar);
4646 *pu16Dst = u64TmpDst;
4647 return VINF_SUCCESS;
4648}
4649
4650
4651/**
4652 * Implements lgdt.
4653 *
4654 * @param iEffSeg The segment of the new gdtr contents
4655 * @param GCPtrEffSrc The address of the new gdtr contents.
4656 * @param enmEffOpSize The effective operand size.
4657 */
4658IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4659{
4660 if (pVCpu->iem.s.uCpl != 0)
4661 return iemRaiseGeneralProtectionFault0(pVCpu);
4662 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
4663
4664 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_WRITES))
4665 {
4666 Log(("lgdt: Guest intercept -> #VMEXIT\n"));
4667 IEM_SVM_UPDATE_NRIP(pVCpu);
4668 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4669 }
4670
4671 /*
4672 * Fetch the limit and base address.
4673 */
4674 uint16_t cbLimit;
4675 RTGCPTR GCPtrBase;
4676 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4677 if (rcStrict == VINF_SUCCESS)
4678 {
4679 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4680 || X86_IS_CANONICAL(GCPtrBase))
4681 {
4682 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4683 rcStrict = CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
4684 else
4685 {
4686 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4687 pCtx->gdtr.cbGdt = cbLimit;
4688 pCtx->gdtr.pGdt = GCPtrBase;
4689 }
4690 if (rcStrict == VINF_SUCCESS)
4691 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4692 }
4693 else
4694 {
4695 Log(("iemCImpl_lgdt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
4696 return iemRaiseGeneralProtectionFault0(pVCpu);
4697 }
4698 }
4699 return rcStrict;
4700}
4701
4702
4703/**
4704 * Implements sgdt.
4705 *
4706 * @param iEffSeg The segment where to store the gdtr content.
4707 * @param GCPtrEffDst The address where to store the gdtr content.
4708 */
4709IEM_CIMPL_DEF_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4710{
4711 /*
4712 * Join paths with sidt.
4713 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4714 * you really must know.
4715 */
4716 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_READS))
4717 {
4718 Log(("sgdt: Guest intercept -> #VMEXIT\n"));
4719 IEM_SVM_UPDATE_NRIP(pVCpu);
4720 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_GDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4721 }
4722
4723 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4724 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst);
4725 if (rcStrict == VINF_SUCCESS)
4726 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4727 return rcStrict;
4728}
4729
4730
4731/**
4732 * Implements lidt.
4733 *
4734 * @param iEffSeg The segment of the new idtr contents
4735 * @param GCPtrEffSrc The address of the new idtr contents.
4736 * @param enmEffOpSize The effective operand size.
4737 */
4738IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4739{
4740 if (pVCpu->iem.s.uCpl != 0)
4741 return iemRaiseGeneralProtectionFault0(pVCpu);
4742 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
4743
4744 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_WRITES))
4745 {
4746 Log(("lidt: Guest intercept -> #VMEXIT\n"));
4747 IEM_SVM_UPDATE_NRIP(pVCpu);
4748 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4749 }
4750
4751 /*
4752 * Fetch the limit and base address.
4753 */
4754 uint16_t cbLimit;
4755 RTGCPTR GCPtrBase;
4756 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4757 if (rcStrict == VINF_SUCCESS)
4758 {
4759 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4760 || X86_IS_CANONICAL(GCPtrBase))
4761 {
4762 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4763 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
4764 else
4765 {
4766 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4767 pCtx->idtr.cbIdt = cbLimit;
4768 pCtx->idtr.pIdt = GCPtrBase;
4769 }
4770 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4771 }
4772 else
4773 {
4774 Log(("iemCImpl_lidt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
4775 return iemRaiseGeneralProtectionFault0(pVCpu);
4776 }
4777 }
4778 return rcStrict;
4779}
4780
4781
4782/**
4783 * Implements sidt.
4784 *
4785 * @param iEffSeg The segment where to store the idtr content.
4786 * @param GCPtrEffDst The address where to store the idtr content.
4787 */
4788IEM_CIMPL_DEF_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4789{
4790 /*
4791 * Join paths with sgdt.
4792 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4793 * you really must know.
4794 */
4795 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
4796 {
4797 Log(("sidt: Guest intercept -> #VMEXIT\n"));
4798 IEM_SVM_UPDATE_NRIP(pVCpu);
4799 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4800 }
4801
4802 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4803 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst);
4804 if (rcStrict == VINF_SUCCESS)
4805 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4806 return rcStrict;
4807}
4808
4809
4810/**
4811 * Implements lldt.
4812 *
4813 * @param uNewLdt The new LDT selector value.
4814 */
4815IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
4816{
4817 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4818
4819 /*
4820 * Check preconditions.
4821 */
4822 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4823 {
4824 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
4825 return iemRaiseUndefinedOpcode(pVCpu);
4826 }
4827 if (pVCpu->iem.s.uCpl != 0)
4828 {
4829 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pVCpu->iem.s.uCpl));
4830 return iemRaiseGeneralProtectionFault0(pVCpu);
4831 }
4832 if (uNewLdt & X86_SEL_LDT)
4833 {
4834 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
4835 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewLdt);
4836 }
4837
4838 /*
4839 * Now, loading a NULL selector is easy.
4840 */
4841 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4842 {
4843 /* Nested-guest SVM intercept. */
4844 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
4845 {
4846 Log(("lldt: Guest intercept -> #VMEXIT\n"));
4847 IEM_SVM_UPDATE_NRIP(pVCpu);
4848 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4849 }
4850
4851 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
4852 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4853 CPUMSetGuestLDTR(pVCpu, uNewLdt);
4854 else
4855 pCtx->ldtr.Sel = uNewLdt;
4856 pCtx->ldtr.ValidSel = uNewLdt;
4857 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4858 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4859 {
4860 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4861 pCtx->ldtr.u64Base = pCtx->ldtr.u32Limit = 0; /* For verfication against REM. */
4862 }
4863 else if (IEM_IS_GUEST_CPU_AMD(pVCpu))
4864 {
4865 /* AMD-V seems to leave the base and limit alone. */
4866 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4867 }
4868 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4869 {
4870 /* VT-x (Intel 3960x) seems to be doing the following. */
4871 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;
4872 pCtx->ldtr.u64Base = 0;
4873 pCtx->ldtr.u32Limit = UINT32_MAX;
4874 }
4875
4876 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4877 return VINF_SUCCESS;
4878 }
4879
4880 /*
4881 * Read the descriptor.
4882 */
4883 IEMSELDESC Desc;
4884 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
4885 if (rcStrict != VINF_SUCCESS)
4886 return rcStrict;
4887
4888 /* Check GPs first. */
4889 if (Desc.Legacy.Gen.u1DescType)
4890 {
4891 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4892 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4893 }
4894 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4895 {
4896 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4897 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4898 }
4899 uint64_t u64Base;
4900 if (!IEM_IS_LONG_MODE(pVCpu))
4901 u64Base = X86DESC_BASE(&Desc.Legacy);
4902 else
4903 {
4904 if (Desc.Long.Gen.u5Zeros)
4905 {
4906 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
4907 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4908 }
4909
4910 u64Base = X86DESC64_BASE(&Desc.Long);
4911 if (!IEM_IS_CANONICAL(u64Base))
4912 {
4913 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
4914 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4915 }
4916 }
4917
4918 /* NP */
4919 if (!Desc.Legacy.Gen.u1Present)
4920 {
4921 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
4922 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewLdt);
4923 }
4924
4925 /* Nested-guest SVM intercept. */
4926 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
4927 {
4928 Log(("lldt: Guest intercept -> #VMEXIT\n"));
4929 IEM_SVM_UPDATE_NRIP(pVCpu);
4930 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4931 }
4932
4933 /*
4934 * It checks out alright, update the registers.
4935 */
4936/** @todo check if the actual value is loaded or if the RPL is dropped */
4937 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4938 CPUMSetGuestLDTR(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4939 else
4940 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4941 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4942 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4943 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4944 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
4945 pCtx->ldtr.u64Base = u64Base;
4946
4947 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4948 return VINF_SUCCESS;
4949}
4950
4951
4952/**
4953 * Implements lldt.
4954 *
4955 * @param uNewLdt The new LDT selector value.
4956 */
4957IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
4958{
4959 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4960
4961 /*
4962 * Check preconditions.
4963 */
4964 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4965 {
4966 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
4967 return iemRaiseUndefinedOpcode(pVCpu);
4968 }
4969 if (pVCpu->iem.s.uCpl != 0)
4970 {
4971 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pVCpu->iem.s.uCpl));
4972 return iemRaiseGeneralProtectionFault0(pVCpu);
4973 }
4974 if (uNewTr & X86_SEL_LDT)
4975 {
4976 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
4977 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewTr);
4978 }
4979 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
4980 {
4981 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
4982 return iemRaiseGeneralProtectionFault0(pVCpu);
4983 }
4984 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TR_WRITES))
4985 {
4986 Log(("ltr: Guest intercept -> #VMEXIT\n"));
4987 IEM_SVM_UPDATE_NRIP(pVCpu);
4988 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4989 }
4990
4991 /*
4992 * Read the descriptor.
4993 */
4994 IEMSELDESC Desc;
4995 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
4996 if (rcStrict != VINF_SUCCESS)
4997 return rcStrict;
4998
4999 /* Check GPs first. */
5000 if (Desc.Legacy.Gen.u1DescType)
5001 {
5002 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5003 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5004 }
5005 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
5006 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
5007 || IEM_IS_LONG_MODE(pVCpu)) )
5008 {
5009 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5010 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5011 }
5012 uint64_t u64Base;
5013 if (!IEM_IS_LONG_MODE(pVCpu))
5014 u64Base = X86DESC_BASE(&Desc.Legacy);
5015 else
5016 {
5017 if (Desc.Long.Gen.u5Zeros)
5018 {
5019 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
5020 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5021 }
5022
5023 u64Base = X86DESC64_BASE(&Desc.Long);
5024 if (!IEM_IS_CANONICAL(u64Base))
5025 {
5026 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
5027 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5028 }
5029 }
5030
5031 /* NP */
5032 if (!Desc.Legacy.Gen.u1Present)
5033 {
5034 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
5035 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewTr);
5036 }
5037
5038 /*
5039 * Set it busy.
5040 * Note! Intel says this should lock down the whole descriptor, but we'll
5041 * restrict our selves to 32-bit for now due to lack of inline
5042 * assembly and such.
5043 */
5044 void *pvDesc;
5045 rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW);
5046 if (rcStrict != VINF_SUCCESS)
5047 return rcStrict;
5048 switch ((uintptr_t)pvDesc & 3)
5049 {
5050 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
5051 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
5052 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
5053 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
5054 }
5055 rcStrict = iemMemCommitAndUnmap(pVCpu, pvDesc, IEM_ACCESS_DATA_RW);
5056 if (rcStrict != VINF_SUCCESS)
5057 return rcStrict;
5058 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
5059
5060 /*
5061 * It checks out alright, update the registers.
5062 */
5063/** @todo check if the actual value is loaded or if the RPL is dropped */
5064 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5065 CPUMSetGuestTR(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5066 else
5067 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
5068 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
5069 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
5070 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5071 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5072 pCtx->tr.u64Base = u64Base;
5073
5074 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5075 return VINF_SUCCESS;
5076}
5077
5078
5079/**
5080 * Implements mov GReg,CRx.
5081 *
5082 * @param iGReg The general register to store the CRx value in.
5083 * @param iCrReg The CRx register to read (valid).
5084 */
5085IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
5086{
5087 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5088 if (pVCpu->iem.s.uCpl != 0)
5089 return iemRaiseGeneralProtectionFault0(pVCpu);
5090 Assert(!pCtx->eflags.Bits.u1VM);
5091
5092 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(pVCpu, iCrReg))
5093 {
5094 Log(("iemCImpl_mov_Rd_Cd: Guest intercept CR%u -> #VMEXIT\n", iCrReg));
5095 IEM_SVM_UPDATE_NRIP(pVCpu);
5096 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);
5097 }
5098
5099 /* read it */
5100 uint64_t crX;
5101 switch (iCrReg)
5102 {
5103 case 0:
5104 crX = pCtx->cr0;
5105 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
5106 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */
5107 break;
5108 case 2: crX = pCtx->cr2; break;
5109 case 3: crX = pCtx->cr3; break;
5110 case 4: crX = pCtx->cr4; break;
5111 case 8:
5112 {
5113#ifdef VBOX_WITH_NESTED_HWVIRT
5114 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5115 {
5116 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
5117 if (pVmcbCtrl->IntCtrl.n.u1VIntrMasking)
5118 {
5119 crX = pVmcbCtrl->IntCtrl.n.u8VTPR & 0xf;
5120 break;
5121 }
5122 }
5123#endif
5124 uint8_t uTpr;
5125 int rc = APICGetTpr(pVCpu, &uTpr, NULL, NULL);
5126 if (RT_SUCCESS(rc))
5127 crX = uTpr >> 4;
5128 else
5129 crX = 0;
5130 break;
5131 }
5132 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5133 }
5134
5135 /* store it */
5136 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5137 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = crX;
5138 else
5139 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)crX;
5140
5141 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5142 return VINF_SUCCESS;
5143}
5144
5145
5146/**
5147 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
5148 *
5149 * @param iCrReg The CRx register to write (valid).
5150 * @param uNewCrX The new value.
5151 * @param enmAccessCrx The instruction that caused the CrX load.
5152 * @param iGReg The general register in case of a 'mov CRx,GReg'
5153 * instruction.
5154 */
5155IEM_CIMPL_DEF_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg)
5156{
5157 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5158 VBOXSTRICTRC rcStrict;
5159 int rc;
5160#ifndef VBOX_WITH_NESTED_HWVIRT
5161 RT_NOREF2(iGReg, enmAccessCrX);
5162#endif
5163
5164 /*
5165 * Try store it.
5166 * Unfortunately, CPUM only does a tiny bit of the work.
5167 */
5168 switch (iCrReg)
5169 {
5170 case 0:
5171 {
5172 /*
5173 * Perform checks.
5174 */
5175 uint64_t const uOldCrX = pCtx->cr0;
5176 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
5177 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
5178 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
5179
5180 /* ET is hardcoded on 486 and later. */
5181 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_486)
5182 uNewCrX |= X86_CR0_ET;
5183 /* The 386 and 486 didn't #GP(0) on attempting to set reserved CR0 bits. ET was settable on 386. */
5184 else if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_486)
5185 {
5186 uNewCrX &= fValid;
5187 uNewCrX |= X86_CR0_ET;
5188 }
5189 else
5190 uNewCrX &= X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG | X86_CR0_ET;
5191
5192 /* Check for reserved bits. */
5193 if (uNewCrX & ~(uint64_t)fValid)
5194 {
5195 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5196 return iemRaiseGeneralProtectionFault0(pVCpu);
5197 }
5198
5199 /* Check for invalid combinations. */
5200 if ( (uNewCrX & X86_CR0_PG)
5201 && !(uNewCrX & X86_CR0_PE) )
5202 {
5203 Log(("Trying to set CR0.PG without CR0.PE\n"));
5204 return iemRaiseGeneralProtectionFault0(pVCpu);
5205 }
5206
5207 if ( !(uNewCrX & X86_CR0_CD)
5208 && (uNewCrX & X86_CR0_NW) )
5209 {
5210 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
5211 return iemRaiseGeneralProtectionFault0(pVCpu);
5212 }
5213
5214 if ( !(uNewCrX & X86_CR0_PG)
5215 && (pCtx->cr4 & X86_CR4_PCIDE))
5216 {
5217 Log(("Trying to clear CR0.PG while leaving CR4.PCID set\n"));
5218 return iemRaiseGeneralProtectionFault0(pVCpu);
5219 }
5220
5221 /* Long mode consistency checks. */
5222 if ( (uNewCrX & X86_CR0_PG)
5223 && !(uOldCrX & X86_CR0_PG)
5224 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
5225 {
5226 if (!(pCtx->cr4 & X86_CR4_PAE))
5227 {
5228 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
5229 return iemRaiseGeneralProtectionFault0(pVCpu);
5230 }
5231 if (pCtx->cs.Attr.n.u1Long)
5232 {
5233 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
5234 return iemRaiseGeneralProtectionFault0(pVCpu);
5235 }
5236 }
5237
5238 /** @todo check reserved PDPTR bits as AMD states. */
5239
5240 /*
5241 * SVM nested-guest CR0 write intercepts.
5242 */
5243 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, iCrReg))
5244 {
5245 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5246 IEM_SVM_UPDATE_NRIP(pVCpu);
5247 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);
5248 }
5249 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
5250 {
5251 /* 'lmsw' intercepts regardless of whether the TS/MP bits are actually toggled. */
5252 if ( enmAccessCrX == IEMACCESSCRX_LMSW
5253 || (uNewCrX & ~(X86_CR0_TS | X86_CR0_MP)) != (uOldCrX & ~(X86_CR0_TS | X86_CR0_MP)))
5254 {
5255 Assert(enmAccessCrX != IEMACCESSCRX_CLTS);
5256 Log(("iemCImpl_load_Cr%#x: lmsw or bits other than TS/MP changed: Guest intercept -> #VMEXIT\n", iCrReg));
5257 IEM_SVM_UPDATE_NRIP(pVCpu);
5258 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_CR0_SEL_WRITE, enmAccessCrX, iGReg);
5259 }
5260 }
5261
5262 /*
5263 * Change CR0.
5264 */
5265 if (!IEM_VERIFICATION_ENABLED(pVCpu))
5266 CPUMSetGuestCR0(pVCpu, uNewCrX);
5267 else
5268 pCtx->cr0 = uNewCrX;
5269 Assert(pCtx->cr0 == uNewCrX);
5270
5271 /*
5272 * Change EFER.LMA if entering or leaving long mode.
5273 */
5274 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
5275 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
5276 {
5277 uint64_t NewEFER = pCtx->msrEFER;
5278 if (uNewCrX & X86_CR0_PG)
5279 NewEFER |= MSR_K6_EFER_LMA;
5280 else
5281 NewEFER &= ~MSR_K6_EFER_LMA;
5282
5283 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5284 CPUMSetGuestEFER(pVCpu, NewEFER);
5285 else
5286 pCtx->msrEFER = NewEFER;
5287 Assert(pCtx->msrEFER == NewEFER);
5288 }
5289
5290 /*
5291 * Inform PGM.
5292 */
5293 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5294 {
5295 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
5296 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
5297 {
5298 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
5299 AssertRCReturn(rc, rc);
5300 /* ignore informational status codes */
5301 }
5302 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
5303 }
5304 else
5305 rcStrict = VINF_SUCCESS;
5306
5307#ifdef IN_RC
5308 /* Return to ring-3 for rescheduling if WP or AM changes. */
5309 if ( rcStrict == VINF_SUCCESS
5310 && ( (uNewCrX & (X86_CR0_WP | X86_CR0_AM))
5311 != (uOldCrX & (X86_CR0_WP | X86_CR0_AM))) )
5312 rcStrict = VINF_EM_RESCHEDULE;
5313#endif
5314 break;
5315 }
5316
5317 /*
5318 * CR2 can be changed without any restrictions.
5319 */
5320 case 2:
5321 {
5322 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 2))
5323 {
5324 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5325 IEM_SVM_UPDATE_NRIP(pVCpu);
5326 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);
5327 }
5328 pCtx->cr2 = uNewCrX;
5329 rcStrict = VINF_SUCCESS;
5330 break;
5331 }
5332
5333 /*
5334 * CR3 is relatively simple, although AMD and Intel have different
5335 * accounts of how setting reserved bits are handled. We take intel's
5336 * word for the lower bits and AMD's for the high bits (63:52). The
5337 * lower reserved bits are ignored and left alone; OpenBSD 5.8 relies
5338 * on this.
5339 */
5340 /** @todo Testcase: Setting reserved bits in CR3, especially before
5341 * enabling paging. */
5342 case 3:
5343 {
5344 /* clear bit 63 from the source operand and indicate no invalidations are required. */
5345 if ( (pCtx->cr4 & X86_CR4_PCIDE)
5346 && (uNewCrX & RT_BIT_64(63)))
5347 {
5348 /** @todo r=ramshankar: avoiding a TLB flush altogether here causes Windows 10
5349 * SMP(w/o nested-paging) to hang during bootup on Skylake systems, see
5350 * Intel spec. 4.10.4.1 "Operations that Invalidate TLBs and
5351 * Paging-Structure Caches". */
5352 uNewCrX &= ~RT_BIT_64(63);
5353 }
5354
5355 /* check / mask the value. */
5356 if (uNewCrX & UINT64_C(0xfff0000000000000))
5357 {
5358 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
5359 return iemRaiseGeneralProtectionFault0(pVCpu);
5360 }
5361
5362 uint64_t fValid;
5363 if ( (pCtx->cr4 & X86_CR4_PAE)
5364 && (pCtx->msrEFER & MSR_K6_EFER_LME))
5365 fValid = UINT64_C(0x000fffffffffffff);
5366 else
5367 fValid = UINT64_C(0xffffffff);
5368 if (uNewCrX & ~fValid)
5369 {
5370 Log(("Automatically clearing reserved MBZ bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
5371 uNewCrX, uNewCrX & ~fValid));
5372 uNewCrX &= fValid;
5373 }
5374
5375 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 3))
5376 {
5377 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5378 IEM_SVM_UPDATE_NRIP(pVCpu);
5379 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);
5380 }
5381
5382 /** @todo If we're in PAE mode we should check the PDPTRs for
5383 * invalid bits. */
5384
5385 /* Make the change. */
5386 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5387 {
5388 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
5389 AssertRCSuccessReturn(rc, rc);
5390 }
5391 else
5392 pCtx->cr3 = uNewCrX;
5393
5394 /* Inform PGM. */
5395 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5396 {
5397 if (pCtx->cr0 & X86_CR0_PG)
5398 {
5399 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
5400 AssertRCReturn(rc, rc);
5401 /* ignore informational status codes */
5402 }
5403 }
5404 rcStrict = VINF_SUCCESS;
5405 break;
5406 }
5407
5408 /*
5409 * CR4 is a bit more tedious as there are bits which cannot be cleared
5410 * under some circumstances and such.
5411 */
5412 case 4:
5413 {
5414 uint64_t const uOldCrX = pCtx->cr4;
5415
5416 /** @todo Shouldn't this look at the guest CPUID bits to determine
5417 * valid bits? e.g. if guest CPUID doesn't allow X86_CR4_OSXMMEEXCPT, we
5418 * should #GP(0). */
5419 /* reserved bits */
5420 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
5421 | X86_CR4_TSD | X86_CR4_DE
5422 | X86_CR4_PSE | X86_CR4_PAE
5423 | X86_CR4_MCE | X86_CR4_PGE
5424 | X86_CR4_PCE | X86_CR4_OSFXSR
5425 | X86_CR4_OSXMMEEXCPT;
5426 //if (xxx)
5427 // fValid |= X86_CR4_VMXE;
5428 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fXSaveRstor)
5429 fValid |= X86_CR4_OSXSAVE;
5430 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fPcid)
5431 fValid |= X86_CR4_PCIDE;
5432 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase)
5433 fValid |= X86_CR4_FSGSBASE;
5434 if (uNewCrX & ~(uint64_t)fValid)
5435 {
5436 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5437 return iemRaiseGeneralProtectionFault0(pVCpu);
5438 }
5439
5440 bool const fPcide = ((uNewCrX ^ uOldCrX) & X86_CR4_PCIDE) && (uNewCrX & X86_CR4_PCIDE);
5441 bool const fLongMode = CPUMIsGuestInLongModeEx(pCtx);
5442
5443 /* PCIDE check. */
5444 if ( fPcide
5445 && ( !fLongMode
5446 || (pCtx->cr3 & UINT64_C(0xfff))))
5447 {
5448 Log(("Trying to set PCIDE with invalid PCID or outside long mode. Pcid=%#x\n", (pCtx->cr3 & UINT64_C(0xfff))));
5449 return iemRaiseGeneralProtectionFault0(pVCpu);
5450 }
5451
5452 /* PAE check. */
5453 if ( fLongMode
5454 && (uOldCrX & X86_CR4_PAE)
5455 && !(uNewCrX & X86_CR4_PAE))
5456 {
5457 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
5458 return iemRaiseGeneralProtectionFault0(pVCpu);
5459 }
5460
5461 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 4))
5462 {
5463 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5464 IEM_SVM_UPDATE_NRIP(pVCpu);
5465 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
5466 }
5467
5468 /*
5469 * Change it.
5470 */
5471 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5472 {
5473 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
5474 AssertRCSuccessReturn(rc, rc);
5475 }
5476 else
5477 pCtx->cr4 = uNewCrX;
5478 Assert(pCtx->cr4 == uNewCrX);
5479
5480 /*
5481 * Notify SELM and PGM.
5482 */
5483 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5484 {
5485 /* SELM - VME may change things wrt to the TSS shadowing. */
5486 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
5487 {
5488 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
5489 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
5490#ifdef VBOX_WITH_RAW_MODE
5491 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
5492 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
5493#endif
5494 }
5495
5496 /* PGM - flushing and mode. */
5497 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */))
5498 {
5499 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
5500 AssertRCReturn(rc, rc);
5501 /* ignore informational status codes */
5502 }
5503 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
5504 }
5505 else
5506 rcStrict = VINF_SUCCESS;
5507 break;
5508 }
5509
5510 /*
5511 * CR8 maps to the APIC TPR.
5512 */
5513 case 8:
5514 {
5515 if (uNewCrX & ~(uint64_t)0xf)
5516 {
5517 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
5518 return iemRaiseGeneralProtectionFault0(pVCpu);
5519 }
5520
5521#ifdef VBOX_WITH_NESTED_HWVIRT
5522 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5523 {
5524 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 8))
5525 {
5526 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5527 IEM_SVM_UPDATE_NRIP(pVCpu);
5528 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);
5529 }
5530
5531 PSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
5532 pVmcbCtrl->IntCtrl.n.u8VTPR = uNewCrX;
5533 if (pVmcbCtrl->IntCtrl.n.u1VIntrMasking)
5534 {
5535 rcStrict = VINF_SUCCESS;
5536 break;
5537 }
5538 }
5539#endif
5540 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5541 {
5542 uint8_t const u8Tpr = (uint8_t)uNewCrX << 4;
5543 APICSetTpr(pVCpu, u8Tpr);
5544 }
5545 rcStrict = VINF_SUCCESS;
5546 break;
5547 }
5548
5549 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5550 }
5551
5552 /*
5553 * Advance the RIP on success.
5554 */
5555 if (RT_SUCCESS(rcStrict))
5556 {
5557 if (rcStrict != VINF_SUCCESS)
5558 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5559 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5560 }
5561
5562 return rcStrict;
5563}
5564
5565
5566/**
5567 * Implements mov CRx,GReg.
5568 *
5569 * @param iCrReg The CRx register to write (valid).
5570 * @param iGReg The general register to load the DRx value from.
5571 */
5572IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
5573{
5574 if (pVCpu->iem.s.uCpl != 0)
5575 return iemRaiseGeneralProtectionFault0(pVCpu);
5576 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
5577
5578 /*
5579 * Read the new value from the source register and call common worker.
5580 */
5581 uint64_t uNewCrX;
5582 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5583 uNewCrX = iemGRegFetchU64(pVCpu, iGReg);
5584 else
5585 uNewCrX = iemGRegFetchU32(pVCpu, iGReg);
5586 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, iCrReg, uNewCrX, IEMACCESSCRX_MOV_CRX, iGReg);
5587}
5588
5589
5590/**
5591 * Implements 'LMSW r/m16'
5592 *
5593 * @param u16NewMsw The new value.
5594 */
5595IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
5596{
5597 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5598
5599 if (pVCpu->iem.s.uCpl != 0)
5600 return iemRaiseGeneralProtectionFault0(pVCpu);
5601 Assert(!pCtx->eflags.Bits.u1VM);
5602
5603 /*
5604 * Compose the new CR0 value and call common worker.
5605 */
5606 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5607 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5608 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */);
5609}
5610
5611
5612/**
5613 * Implements 'CLTS'.
5614 */
5615IEM_CIMPL_DEF_0(iemCImpl_clts)
5616{
5617 if (pVCpu->iem.s.uCpl != 0)
5618 return iemRaiseGeneralProtectionFault0(pVCpu);
5619
5620 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5621 uint64_t uNewCr0 = pCtx->cr0;
5622 uNewCr0 &= ~X86_CR0_TS;
5623 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_CLTS, UINT8_MAX /* iGReg */);
5624}
5625
5626
5627/**
5628 * Implements mov GReg,DRx.
5629 *
5630 * @param iGReg The general register to store the DRx value in.
5631 * @param iDrReg The DRx register to read (0-7).
5632 */
5633IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
5634{
5635 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5636
5637 /*
5638 * Check preconditions.
5639 */
5640
5641 /* Raise GPs. */
5642 if (pVCpu->iem.s.uCpl != 0)
5643 return iemRaiseGeneralProtectionFault0(pVCpu);
5644 Assert(!pCtx->eflags.Bits.u1VM);
5645
5646 if ( (iDrReg == 4 || iDrReg == 5)
5647 && (pCtx->cr4 & X86_CR4_DE) )
5648 {
5649 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
5650 return iemRaiseGeneralProtectionFault0(pVCpu);
5651 }
5652
5653 /* Raise #DB if general access detect is enabled. */
5654 if (pCtx->dr[7] & X86_DR7_GD)
5655 {
5656 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
5657 return iemRaiseDebugException(pVCpu);
5658 }
5659
5660 /*
5661 * Read the debug register and store it in the specified general register.
5662 */
5663 uint64_t drX;
5664 switch (iDrReg)
5665 {
5666 case 0: drX = pCtx->dr[0]; break;
5667 case 1: drX = pCtx->dr[1]; break;
5668 case 2: drX = pCtx->dr[2]; break;
5669 case 3: drX = pCtx->dr[3]; break;
5670 case 6:
5671 case 4:
5672 drX = pCtx->dr[6];
5673 drX |= X86_DR6_RA1_MASK;
5674 drX &= ~X86_DR6_RAZ_MASK;
5675 break;
5676 case 7:
5677 case 5:
5678 drX = pCtx->dr[7];
5679 drX |=X86_DR7_RA1_MASK;
5680 drX &= ~X86_DR7_RAZ_MASK;
5681 break;
5682 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5683 }
5684
5685 /** @todo SVM nested-guest intercept for DR8-DR15? */
5686 /*
5687 * Check for any SVM nested-guest intercepts for the DRx read.
5688 */
5689 if (IEM_IS_SVM_READ_DR_INTERCEPT_SET(pVCpu, iDrReg))
5690 {
5691 Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg));
5692 IEM_SVM_UPDATE_NRIP(pVCpu);
5693 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),
5694 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
5695 }
5696
5697 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5698 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = drX;
5699 else
5700 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)drX;
5701
5702 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5703 return VINF_SUCCESS;
5704}
5705
5706
5707/**
5708 * Implements mov DRx,GReg.
5709 *
5710 * @param iDrReg The DRx register to write (valid).
5711 * @param iGReg The general register to load the DRx value from.
5712 */
5713IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
5714{
5715 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5716
5717 /*
5718 * Check preconditions.
5719 */
5720 if (pVCpu->iem.s.uCpl != 0)
5721 return iemRaiseGeneralProtectionFault0(pVCpu);
5722 Assert(!pCtx->eflags.Bits.u1VM);
5723
5724 if (iDrReg == 4 || iDrReg == 5)
5725 {
5726 if (pCtx->cr4 & X86_CR4_DE)
5727 {
5728 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
5729 return iemRaiseGeneralProtectionFault0(pVCpu);
5730 }
5731 iDrReg += 2;
5732 }
5733
5734 /* Raise #DB if general access detect is enabled. */
5735 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
5736 * \#GP? */
5737 if (pCtx->dr[7] & X86_DR7_GD)
5738 {
5739 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
5740 return iemRaiseDebugException(pVCpu);
5741 }
5742
5743 /*
5744 * Read the new value from the source register.
5745 */
5746 uint64_t uNewDrX;
5747 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5748 uNewDrX = iemGRegFetchU64(pVCpu, iGReg);
5749 else
5750 uNewDrX = iemGRegFetchU32(pVCpu, iGReg);
5751
5752 /*
5753 * Adjust it.
5754 */
5755 switch (iDrReg)
5756 {
5757 case 0:
5758 case 1:
5759 case 2:
5760 case 3:
5761 /* nothing to adjust */
5762 break;
5763
5764 case 6:
5765 if (uNewDrX & X86_DR6_MBZ_MASK)
5766 {
5767 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5768 return iemRaiseGeneralProtectionFault0(pVCpu);
5769 }
5770 uNewDrX |= X86_DR6_RA1_MASK;
5771 uNewDrX &= ~X86_DR6_RAZ_MASK;
5772 break;
5773
5774 case 7:
5775 if (uNewDrX & X86_DR7_MBZ_MASK)
5776 {
5777 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5778 return iemRaiseGeneralProtectionFault0(pVCpu);
5779 }
5780 uNewDrX |= X86_DR7_RA1_MASK;
5781 uNewDrX &= ~X86_DR7_RAZ_MASK;
5782 break;
5783
5784 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5785 }
5786
5787 /** @todo SVM nested-guest intercept for DR8-DR15? */
5788 /*
5789 * Check for any SVM nested-guest intercepts for the DRx write.
5790 */
5791 if (IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(pVCpu, iDrReg))
5792 {
5793 Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg));
5794 IEM_SVM_UPDATE_NRIP(pVCpu);
5795 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),
5796 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
5797 }
5798
5799 /*
5800 * Do the actual setting.
5801 */
5802 if (!IEM_VERIFICATION_ENABLED(pVCpu))
5803 {
5804 int rc = CPUMSetGuestDRx(pVCpu, iDrReg, uNewDrX);
5805 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc);
5806 }
5807 else
5808 pCtx->dr[iDrReg] = uNewDrX;
5809
5810 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5811 return VINF_SUCCESS;
5812}
5813
5814
5815/**
5816 * Implements 'INVLPG m'.
5817 *
5818 * @param GCPtrPage The effective address of the page to invalidate.
5819 * @remarks Updates the RIP.
5820 */
5821IEM_CIMPL_DEF_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage)
5822{
5823 /* ring-0 only. */
5824 if (pVCpu->iem.s.uCpl != 0)
5825 return iemRaiseGeneralProtectionFault0(pVCpu);
5826 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
5827
5828 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
5829 {
5830 Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
5831 IEM_SVM_UPDATE_NRIP(pVCpu);
5832 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_INVLPG,
5833 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? GCPtrPage : 0, 0 /* uExitInfo2 */);
5834 }
5835
5836 int rc = PGMInvalidatePage(pVCpu, GCPtrPage);
5837 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5838
5839 if (rc == VINF_SUCCESS)
5840 return VINF_SUCCESS;
5841 if (rc == VINF_PGM_SYNC_CR3)
5842 return iemSetPassUpStatus(pVCpu, rc);
5843
5844 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
5845 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", GCPtrPage, rc));
5846 return rc;
5847}
5848
5849
5850/**
5851 * Implements INVPCID.
5852 *
5853 * @param uInvpcidType The invalidation type.
5854 * @param GCPtrInvpcidDesc The effective address of invpcid descriptor.
5855 * @remarks Updates the RIP.
5856 */
5857IEM_CIMPL_DEF_2(iemCImpl_invpcid, uint64_t, uInvpcidType, RTGCPTR, GCPtrInvpcidDesc)
5858{
5859 /*
5860 * Check preconditions.
5861 */
5862 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fInvpcid)
5863 return iemRaiseUndefinedOpcode(pVCpu);
5864 if (pVCpu->iem.s.uCpl != 0)
5865 {
5866 Log(("invpcid: CPL != 0 -> #GP(0)\n"));
5867 return iemRaiseGeneralProtectionFault0(pVCpu);
5868 }
5869 if (IEM_IS_V86_MODE(pVCpu))
5870 {
5871 Log(("invpcid: v8086 mode -> #GP(0)\n"));
5872 return iemRaiseGeneralProtectionFault0(pVCpu);
5873 }
5874 if (uInvpcidType > X86_INVPCID_TYPE_MAX_VALID)
5875 {
5876 Log(("invpcid: invalid/unrecognized invpcid type %#x -> #GP(0)\n", uInvpcidType));
5877 return iemRaiseGeneralProtectionFault0(pVCpu);
5878 }
5879
5880 /*
5881 * Fetch the invpcid descriptor from guest memory.
5882 */
5883 RTUINT128U uDesc;
5884 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, pVCpu->iem.s.iEffSeg, GCPtrInvpcidDesc);
5885 if (rcStrict == VINF_SUCCESS)
5886 {
5887 /*
5888 * Validate the descriptor.
5889 */
5890 if (uDesc.s.Lo > 0xfff)
5891 {
5892 Log(("invpcid: reserved bits set in invpcid descriptor %#RX64 -> #GP(0)\n", uDesc.s.Lo));
5893 return iemRaiseGeneralProtectionFault0(pVCpu);
5894 }
5895
5896 RTGCUINTPTR64 const GCPtrInvAddr = uDesc.s.Hi;
5897 uint8_t const uPcid = uDesc.s.Lo & UINT64_C(0xfff);
5898 uint32_t const uCr4 = IEM_GET_CTX(pVCpu)->cr4;
5899 uint64_t const uCr3 = IEM_GET_CTX(pVCpu)->cr3;
5900 switch (uInvpcidType)
5901 {
5902 case X86_INVPCID_TYPE_INDV_ADDR:
5903 {
5904 if (!IEM_IS_CANONICAL(GCPtrInvAddr))
5905 {
5906 Log(("invpcid: invalidation address %#RGP is not canonical -> #GP(0)\n", GCPtrInvAddr));
5907 return iemRaiseGeneralProtectionFault0(pVCpu);
5908 }
5909 if ( !(uCr4 & X86_CR4_PCIDE)
5910 && uPcid != 0)
5911 {
5912 Log(("invpcid: invalid pcid %#x\n", uPcid));
5913 return iemRaiseGeneralProtectionFault0(pVCpu);
5914 }
5915
5916 /* Invalidate mappings for the linear address tagged with PCID except global translations. */
5917 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
5918 break;
5919 }
5920
5921 case X86_INVPCID_TYPE_SINGLE_CONTEXT:
5922 {
5923 if ( !(uCr4 & X86_CR4_PCIDE)
5924 && uPcid != 0)
5925 {
5926 Log(("invpcid: invalid pcid %#x\n", uPcid));
5927 return iemRaiseGeneralProtectionFault0(pVCpu);
5928 }
5929 /* Invalidate all mappings associated with PCID except global translations. */
5930 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
5931 break;
5932 }
5933
5934 case X86_INVPCID_TYPE_ALL_CONTEXT_INCL_GLOBAL:
5935 {
5936 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
5937 break;
5938 }
5939
5940 case X86_INVPCID_TYPE_ALL_CONTEXT_EXCL_GLOBAL:
5941 {
5942 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
5943 break;
5944 }
5945 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5946 }
5947 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5948 }
5949 return rcStrict;
5950}
5951
5952
5953/**
5954 * Implements RDTSC.
5955 */
5956IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
5957{
5958 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5959
5960 /*
5961 * Check preconditions.
5962 */
5963 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fTsc)
5964 return iemRaiseUndefinedOpcode(pVCpu);
5965
5966 if ( (pCtx->cr4 & X86_CR4_TSD)
5967 && pVCpu->iem.s.uCpl != 0)
5968 {
5969 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5970 return iemRaiseGeneralProtectionFault0(pVCpu);
5971 }
5972
5973 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
5974 {
5975 Log(("rdtsc: Guest intercept -> #VMEXIT\n"));
5976 IEM_SVM_UPDATE_NRIP(pVCpu);
5977 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5978 }
5979
5980 /*
5981 * Do the job.
5982 */
5983 uint64_t uTicks = TMCpuTickGet(pVCpu);
5984#ifdef VBOX_WITH_NESTED_HWVIRT
5985 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
5986#endif
5987 pCtx->rax = RT_LO_U32(uTicks);
5988 pCtx->rdx = RT_HI_U32(uTicks);
5989#ifdef IEM_VERIFICATION_MODE_FULL
5990 pVCpu->iem.s.fIgnoreRaxRdx = true;
5991#endif
5992
5993 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5994 return VINF_SUCCESS;
5995}
5996
5997
5998/**
5999 * Implements RDTSC.
6000 */
6001IEM_CIMPL_DEF_0(iemCImpl_rdtscp)
6002{
6003 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6004
6005 /*
6006 * Check preconditions.
6007 */
6008 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fRdTscP)
6009 return iemRaiseUndefinedOpcode(pVCpu);
6010
6011 if ( (pCtx->cr4 & X86_CR4_TSD)
6012 && pVCpu->iem.s.uCpl != 0)
6013 {
6014 Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
6015 return iemRaiseGeneralProtectionFault0(pVCpu);
6016 }
6017
6018 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
6019 {
6020 Log(("rdtscp: Guest intercept -> #VMEXIT\n"));
6021 IEM_SVM_UPDATE_NRIP(pVCpu);
6022 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6023 }
6024
6025 /*
6026 * Do the job.
6027 * Query the MSR first in case of trips to ring-3.
6028 */
6029 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx);
6030 if (rcStrict == VINF_SUCCESS)
6031 {
6032 /* Low dword of the TSC_AUX msr only. */
6033 pCtx->rcx &= UINT32_C(0xffffffff);
6034
6035 uint64_t uTicks = TMCpuTickGet(pVCpu);
6036#ifdef VBOX_WITH_NESTED_HWVIRT
6037 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
6038#endif
6039 pCtx->rax = RT_LO_U32(uTicks);
6040 pCtx->rdx = RT_HI_U32(uTicks);
6041#ifdef IEM_VERIFICATION_MODE_FULL
6042 pVCpu->iem.s.fIgnoreRaxRdx = true;
6043#endif
6044 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6045 }
6046 return rcStrict;
6047}
6048
6049
6050/**
6051 * Implements RDPMC.
6052 */
6053IEM_CIMPL_DEF_0(iemCImpl_rdpmc)
6054{
6055 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6056 if ( pVCpu->iem.s.uCpl != 0
6057 && !(pCtx->cr4 & X86_CR4_PCE))
6058 return iemRaiseGeneralProtectionFault0(pVCpu);
6059
6060 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
6061 {
6062 Log(("rdpmc: Guest intercept -> #VMEXIT\n"));
6063 IEM_SVM_UPDATE_NRIP(pVCpu);
6064 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6065 }
6066
6067 /** @todo Implement RDPMC for the regular guest execution case (the above only
6068 * handles nested-guest intercepts). */
6069 RT_NOREF(cbInstr);
6070 return VERR_IEM_INSTR_NOT_IMPLEMENTED;
6071}
6072
6073
6074/**
6075 * Implements RDMSR.
6076 */
6077IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
6078{
6079 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6080
6081 /*
6082 * Check preconditions.
6083 */
6084 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
6085 return iemRaiseUndefinedOpcode(pVCpu);
6086 if (pVCpu->iem.s.uCpl != 0)
6087 return iemRaiseGeneralProtectionFault0(pVCpu);
6088
6089 /*
6090 * Do the job.
6091 */
6092 RTUINT64U uValue;
6093 VBOXSTRICTRC rcStrict;
6094#ifdef VBOX_WITH_NESTED_HWVIRT
6095 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
6096 {
6097 rcStrict = iemSvmHandleMsrIntercept(pVCpu, pCtx, pCtx->ecx, false /* fWrite */);
6098 if (rcStrict == VINF_SVM_VMEXIT)
6099 return VINF_SUCCESS;
6100 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
6101 {
6102 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pCtx->ecx, VBOXSTRICTRC_VAL(rcStrict)));
6103 return rcStrict;
6104 }
6105 }
6106#endif
6107
6108 rcStrict = CPUMQueryGuestMsr(pVCpu, pCtx->ecx, &uValue.u);
6109 if (rcStrict == VINF_SUCCESS)
6110 {
6111 pCtx->rax = uValue.s.Lo;
6112 pCtx->rdx = uValue.s.Hi;
6113
6114 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6115 return VINF_SUCCESS;
6116 }
6117
6118#ifndef IN_RING3
6119 /* Deferred to ring-3. */
6120 if (rcStrict == VINF_CPUM_R3_MSR_READ)
6121 {
6122 Log(("IEM: rdmsr(%#x) -> ring-3\n", pCtx->ecx));
6123 return rcStrict;
6124 }
6125#else /* IN_RING3 */
6126 /* Often a unimplemented MSR or MSR bit, so worth logging. */
6127 static uint32_t s_cTimes = 0;
6128 if (s_cTimes++ < 10)
6129 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
6130 else
6131#endif
6132 Log(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
6133 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
6134 return iemRaiseGeneralProtectionFault0(pVCpu);
6135}
6136
6137
6138/**
6139 * Implements WRMSR.
6140 */
6141IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
6142{
6143 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6144
6145 /*
6146 * Check preconditions.
6147 */
6148 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
6149 return iemRaiseUndefinedOpcode(pVCpu);
6150 if (pVCpu->iem.s.uCpl != 0)
6151 return iemRaiseGeneralProtectionFault0(pVCpu);
6152
6153 /*
6154 * Do the job.
6155 */
6156 RTUINT64U uValue;
6157 uValue.s.Lo = pCtx->eax;
6158 uValue.s.Hi = pCtx->edx;
6159
6160 VBOXSTRICTRC rcStrict;
6161#ifdef VBOX_WITH_NESTED_HWVIRT
6162 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
6163 {
6164 rcStrict = iemSvmHandleMsrIntercept(pVCpu, pCtx, pCtx->ecx, true /* fWrite */);
6165 if (rcStrict == VINF_SVM_VMEXIT)
6166 return VINF_SUCCESS;
6167 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
6168 {
6169 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pCtx->ecx, VBOXSTRICTRC_VAL(rcStrict)));
6170 return rcStrict;
6171 }
6172 }
6173#endif
6174
6175 if (!IEM_VERIFICATION_ENABLED(pVCpu))
6176 rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u);
6177 else
6178 {
6179#ifdef IN_RING3
6180 CPUMCTX CtxTmp = *pCtx;
6181 rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u);
6182 PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6183 *pCtx = *pCtx2;
6184 *pCtx2 = CtxTmp;
6185#else
6186 AssertReleaseFailedReturn(VERR_IEM_IPE_2);
6187#endif
6188 }
6189 if (rcStrict == VINF_SUCCESS)
6190 {
6191 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6192 return VINF_SUCCESS;
6193 }
6194
6195#ifndef IN_RING3
6196 /* Deferred to ring-3. */
6197 if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
6198 {
6199 Log(("IEM: wrmsr(%#x) -> ring-3\n", pCtx->ecx));
6200 return rcStrict;
6201 }
6202#else /* IN_RING3 */
6203 /* Often a unimplemented MSR or MSR bit, so worth logging. */
6204 static uint32_t s_cTimes = 0;
6205 if (s_cTimes++ < 10)
6206 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
6207 else
6208#endif
6209 Log(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
6210 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
6211 return iemRaiseGeneralProtectionFault0(pVCpu);
6212}
6213
6214
6215/**
6216 * Implements 'IN eAX, port'.
6217 *
6218 * @param u16Port The source port.
6219 * @param cbReg The register size.
6220 */
6221IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
6222{
6223 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6224
6225 /*
6226 * CPL check
6227 */
6228 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx, u16Port, cbReg);
6229 if (rcStrict != VINF_SUCCESS)
6230 return rcStrict;
6231
6232 /*
6233 * Check SVM nested-guest IO intercept.
6234 */
6235#ifdef VBOX_WITH_NESTED_HWVIRT
6236 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
6237 {
6238 uint8_t cAddrSizeBits;
6239 switch (pVCpu->iem.s.enmEffAddrMode)
6240 {
6241 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
6242 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
6243 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
6244 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6245 }
6246 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
6247 false /* fRep */, false /* fStrIo */, cbInstr);
6248 if (rcStrict == VINF_SVM_VMEXIT)
6249 return VINF_SUCCESS;
6250 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
6251 {
6252 Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
6253 VBOXSTRICTRC_VAL(rcStrict)));
6254 return rcStrict;
6255 }
6256 }
6257#endif
6258
6259 /*
6260 * Perform the I/O.
6261 */
6262 uint32_t u32Value;
6263 if (!IEM_VERIFICATION_ENABLED(pVCpu))
6264 rcStrict = IOMIOPortRead(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, &u32Value, cbReg);
6265 else
6266 rcStrict = iemVerifyFakeIOPortRead(pVCpu, u16Port, &u32Value, cbReg);
6267 if (IOM_SUCCESS(rcStrict))
6268 {
6269 switch (cbReg)
6270 {
6271 case 1: pCtx->al = (uint8_t)u32Value; break;
6272 case 2: pCtx->ax = (uint16_t)u32Value; break;
6273 case 4: pCtx->rax = u32Value; break;
6274 default: AssertFailedReturn(VERR_IEM_IPE_3);
6275 }
6276 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6277 pVCpu->iem.s.cPotentialExits++;
6278 if (rcStrict != VINF_SUCCESS)
6279 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6280 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
6281
6282 /*
6283 * Check for I/O breakpoints.
6284 */
6285 uint32_t const uDr7 = pCtx->dr[7];
6286 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
6287 && X86_DR7_ANY_RW_IO(uDr7)
6288 && (pCtx->cr4 & X86_CR4_DE))
6289 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM))))
6290 {
6291 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx, u16Port, cbReg);
6292 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6293 rcStrict = iemRaiseDebugException(pVCpu);
6294 }
6295 }
6296
6297 return rcStrict;
6298}
6299
6300
6301/**
6302 * Implements 'IN eAX, DX'.
6303 *
6304 * @param cbReg The register size.
6305 */
6306IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
6307{
6308 return IEM_CIMPL_CALL_2(iemCImpl_in, IEM_GET_CTX(pVCpu)->dx, cbReg);
6309}
6310
6311
6312/**
6313 * Implements 'OUT port, eAX'.
6314 *
6315 * @param u16Port The destination port.
6316 * @param cbReg The register size.
6317 */
6318IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
6319{
6320 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6321
6322 /*
6323 * CPL check
6324 */
6325 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx, u16Port, cbReg);
6326 if (rcStrict != VINF_SUCCESS)
6327 return rcStrict;
6328
6329 /*
6330 * Check SVM nested-guest IO intercept.
6331 */
6332#ifdef VBOX_WITH_NESTED_HWVIRT
6333 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
6334 {
6335 uint8_t cAddrSizeBits;
6336 switch (pVCpu->iem.s.enmEffAddrMode)
6337 {
6338 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
6339 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
6340 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
6341 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6342 }
6343 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
6344 false /* fRep */, false /* fStrIo */, cbInstr);
6345 if (rcStrict == VINF_SVM_VMEXIT)
6346 return VINF_SUCCESS;
6347 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
6348 {
6349 Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
6350 VBOXSTRICTRC_VAL(rcStrict)));
6351 return rcStrict;
6352 }
6353 }
6354#endif
6355
6356 /*
6357 * Perform the I/O.
6358 */
6359 uint32_t u32Value;
6360 switch (cbReg)
6361 {
6362 case 1: u32Value = pCtx->al; break;
6363 case 2: u32Value = pCtx->ax; break;
6364 case 4: u32Value = pCtx->eax; break;
6365 default: AssertFailedReturn(VERR_IEM_IPE_4);
6366 }
6367 if (!IEM_VERIFICATION_ENABLED(pVCpu))
6368 rcStrict = IOMIOPortWrite(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, u32Value, cbReg);
6369 else
6370 rcStrict = iemVerifyFakeIOPortWrite(pVCpu, u16Port, u32Value, cbReg);
6371 if (IOM_SUCCESS(rcStrict))
6372 {
6373 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6374 pVCpu->iem.s.cPotentialExits++;
6375 if (rcStrict != VINF_SUCCESS)
6376 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6377 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
6378
6379 /*
6380 * Check for I/O breakpoints.
6381 */
6382 uint32_t const uDr7 = pCtx->dr[7];
6383 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
6384 && X86_DR7_ANY_RW_IO(uDr7)
6385 && (pCtx->cr4 & X86_CR4_DE))
6386 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM))))
6387 {
6388 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx, u16Port, cbReg);
6389 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6390 rcStrict = iemRaiseDebugException(pVCpu);
6391 }
6392 }
6393 return rcStrict;
6394}
6395
6396
6397/**
6398 * Implements 'OUT DX, eAX'.
6399 *
6400 * @param cbReg The register size.
6401 */
6402IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
6403{
6404 return IEM_CIMPL_CALL_2(iemCImpl_out, IEM_GET_CTX(pVCpu)->dx, cbReg);
6405}
6406
6407
6408/**
6409 * Implements 'CLI'.
6410 */
6411IEM_CIMPL_DEF_0(iemCImpl_cli)
6412{
6413 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6414 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
6415 uint32_t const fEflOld = fEfl;
6416 if (pCtx->cr0 & X86_CR0_PE)
6417 {
6418 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
6419 if (!(fEfl & X86_EFL_VM))
6420 {
6421 if (pVCpu->iem.s.uCpl <= uIopl)
6422 fEfl &= ~X86_EFL_IF;
6423 else if ( pVCpu->iem.s.uCpl == 3
6424 && (pCtx->cr4 & X86_CR4_PVI) )
6425 fEfl &= ~X86_EFL_VIF;
6426 else
6427 return iemRaiseGeneralProtectionFault0(pVCpu);
6428 }
6429 /* V8086 */
6430 else if (uIopl == 3)
6431 fEfl &= ~X86_EFL_IF;
6432 else if ( uIopl < 3
6433 && (pCtx->cr4 & X86_CR4_VME) )
6434 fEfl &= ~X86_EFL_VIF;
6435 else
6436 return iemRaiseGeneralProtectionFault0(pVCpu);
6437 }
6438 /* real mode */
6439 else
6440 fEfl &= ~X86_EFL_IF;
6441
6442 /* Commit. */
6443 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
6444 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6445 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld);
6446 return VINF_SUCCESS;
6447}
6448
6449
6450/**
6451 * Implements 'STI'.
6452 */
6453IEM_CIMPL_DEF_0(iemCImpl_sti)
6454{
6455 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6456 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
6457 uint32_t const fEflOld = fEfl;
6458
6459 if (pCtx->cr0 & X86_CR0_PE)
6460 {
6461 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
6462 if (!(fEfl & X86_EFL_VM))
6463 {
6464 if (pVCpu->iem.s.uCpl <= uIopl)
6465 fEfl |= X86_EFL_IF;
6466 else if ( pVCpu->iem.s.uCpl == 3
6467 && (pCtx->cr4 & X86_CR4_PVI)
6468 && !(fEfl & X86_EFL_VIP) )
6469 fEfl |= X86_EFL_VIF;
6470 else
6471 return iemRaiseGeneralProtectionFault0(pVCpu);
6472 }
6473 /* V8086 */
6474 else if (uIopl == 3)
6475 fEfl |= X86_EFL_IF;
6476 else if ( uIopl < 3
6477 && (pCtx->cr4 & X86_CR4_VME)
6478 && !(fEfl & X86_EFL_VIP) )
6479 fEfl |= X86_EFL_VIF;
6480 else
6481 return iemRaiseGeneralProtectionFault0(pVCpu);
6482 }
6483 /* real mode */
6484 else
6485 fEfl |= X86_EFL_IF;
6486
6487 /* Commit. */
6488 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
6489 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6490 if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
6491 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
6492 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
6493 return VINF_SUCCESS;
6494}
6495
6496
6497/**
6498 * Implements 'HLT'.
6499 */
6500IEM_CIMPL_DEF_0(iemCImpl_hlt)
6501{
6502 if (pVCpu->iem.s.uCpl != 0)
6503 return iemRaiseGeneralProtectionFault0(pVCpu);
6504
6505 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_HLT))
6506 {
6507 Log2(("hlt: Guest intercept -> #VMEXIT\n"));
6508 IEM_SVM_UPDATE_NRIP(pVCpu);
6509 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6510 }
6511
6512 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6513 return VINF_EM_HALT;
6514}
6515
6516
6517/**
6518 * Implements 'MONITOR'.
6519 */
6520IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
6521{
6522 /*
6523 * Permission checks.
6524 */
6525 if (pVCpu->iem.s.uCpl != 0)
6526 {
6527 Log2(("monitor: CPL != 0\n"));
6528 return iemRaiseUndefinedOpcode(pVCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
6529 }
6530 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
6531 {
6532 Log2(("monitor: Not in CPUID\n"));
6533 return iemRaiseUndefinedOpcode(pVCpu);
6534 }
6535
6536 /*
6537 * Gather the operands and validate them.
6538 */
6539 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6540 RTGCPTR GCPtrMem = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
6541 uint32_t uEcx = pCtx->ecx;
6542 uint32_t uEdx = pCtx->edx;
6543/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
6544 * \#GP first. */
6545 if (uEcx != 0)
6546 {
6547 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx);
6548 return iemRaiseGeneralProtectionFault0(pVCpu);
6549 }
6550
6551 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
6552 if (rcStrict != VINF_SUCCESS)
6553 return rcStrict;
6554
6555 RTGCPHYS GCPhysMem;
6556 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
6557 if (rcStrict != VINF_SUCCESS)
6558 return rcStrict;
6559
6560 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
6561 {
6562 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
6563 IEM_SVM_UPDATE_NRIP(pVCpu);
6564 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6565 }
6566
6567 /*
6568 * Call EM to prepare the monitor/wait.
6569 */
6570 rcStrict = EMMonitorWaitPrepare(pVCpu, pCtx->rax, pCtx->rcx, pCtx->rdx, GCPhysMem);
6571 Assert(rcStrict == VINF_SUCCESS);
6572
6573 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6574 return rcStrict;
6575}
6576
6577
6578/**
6579 * Implements 'MWAIT'.
6580 */
6581IEM_CIMPL_DEF_0(iemCImpl_mwait)
6582{
6583 /*
6584 * Permission checks.
6585 */
6586 if (pVCpu->iem.s.uCpl != 0)
6587 {
6588 Log2(("mwait: CPL != 0\n"));
6589 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
6590 * EFLAGS.VM then.) */
6591 return iemRaiseUndefinedOpcode(pVCpu);
6592 }
6593 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
6594 {
6595 Log2(("mwait: Not in CPUID\n"));
6596 return iemRaiseUndefinedOpcode(pVCpu);
6597 }
6598
6599 /*
6600 * Gather the operands and validate them.
6601 */
6602 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6603 uint32_t uEax = pCtx->eax;
6604 uint32_t uEcx = pCtx->ecx;
6605 if (uEcx != 0)
6606 {
6607 /* Only supported extension is break on IRQ when IF=0. */
6608 if (uEcx > 1)
6609 {
6610 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
6611 return iemRaiseGeneralProtectionFault0(pVCpu);
6612 }
6613 uint32_t fMWaitFeatures = 0;
6614 uint32_t uIgnore = 0;
6615 CPUMGetGuestCpuId(pVCpu, 5, 0, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
6616 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
6617 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
6618 {
6619 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
6620 return iemRaiseGeneralProtectionFault0(pVCpu);
6621 }
6622 }
6623
6624 /*
6625 * Check SVM nested-guest mwait intercepts.
6626 */
6627 if ( IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT_ARMED)
6628 && EMMonitorIsArmed(pVCpu))
6629 {
6630 Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n"));
6631 IEM_SVM_UPDATE_NRIP(pVCpu);
6632 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6633 }
6634 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
6635 {
6636 Log2(("mwait: Guest intercept -> #VMEXIT\n"));
6637 IEM_SVM_UPDATE_NRIP(pVCpu);
6638 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6639 }
6640
6641 /*
6642 * Call EM to prepare the monitor/wait.
6643 */
6644 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(pVCpu, uEax, uEcx);
6645
6646 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6647 return rcStrict;
6648}
6649
6650
6651/**
6652 * Implements 'SWAPGS'.
6653 */
6654IEM_CIMPL_DEF_0(iemCImpl_swapgs)
6655{
6656 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); /* Caller checks this. */
6657
6658 /*
6659 * Permission checks.
6660 */
6661 if (pVCpu->iem.s.uCpl != 0)
6662 {
6663 Log2(("swapgs: CPL != 0\n"));
6664 return iemRaiseUndefinedOpcode(pVCpu);
6665 }
6666
6667 /*
6668 * Do the job.
6669 */
6670 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6671 uint64_t uOtherGsBase = pCtx->msrKERNELGSBASE;
6672 pCtx->msrKERNELGSBASE = pCtx->gs.u64Base;
6673 pCtx->gs.u64Base = uOtherGsBase;
6674
6675 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6676 return VINF_SUCCESS;
6677}
6678
6679
6680/**
6681 * Implements 'CPUID'.
6682 */
6683IEM_CIMPL_DEF_0(iemCImpl_cpuid)
6684{
6685 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6686
6687 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
6688 {
6689 Log2(("cpuid: Guest intercept -> #VMEXIT\n"));
6690 IEM_SVM_UPDATE_NRIP(pVCpu);
6691 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6692 }
6693
6694 CPUMGetGuestCpuId(pVCpu, pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
6695 pCtx->rax &= UINT32_C(0xffffffff);
6696 pCtx->rbx &= UINT32_C(0xffffffff);
6697 pCtx->rcx &= UINT32_C(0xffffffff);
6698 pCtx->rdx &= UINT32_C(0xffffffff);
6699
6700 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6701 return VINF_SUCCESS;
6702}
6703
6704
6705/**
6706 * Implements 'AAD'.
6707 *
6708 * @param bImm The immediate operand.
6709 */
6710IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
6711{
6712 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6713
6714 uint16_t const ax = pCtx->ax;
6715 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
6716 pCtx->ax = al;
6717 iemHlpUpdateArithEFlagsU8(pVCpu, al,
6718 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
6719 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
6720
6721 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6722 return VINF_SUCCESS;
6723}
6724
6725
6726/**
6727 * Implements 'AAM'.
6728 *
6729 * @param bImm The immediate operand. Cannot be 0.
6730 */
6731IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
6732{
6733 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6734 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
6735
6736 uint16_t const ax = pCtx->ax;
6737 uint8_t const al = (uint8_t)ax % bImm;
6738 uint8_t const ah = (uint8_t)ax / bImm;
6739 pCtx->ax = (ah << 8) + al;
6740 iemHlpUpdateArithEFlagsU8(pVCpu, al,
6741 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
6742 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
6743
6744 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6745 return VINF_SUCCESS;
6746}
6747
6748
6749/**
6750 * Implements 'DAA'.
6751 */
6752IEM_CIMPL_DEF_0(iemCImpl_daa)
6753{
6754 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6755
6756 uint8_t const al = pCtx->al;
6757 bool const fCarry = pCtx->eflags.Bits.u1CF;
6758
6759 if ( pCtx->eflags.Bits.u1AF
6760 || (al & 0xf) >= 10)
6761 {
6762 pCtx->al = al + 6;
6763 pCtx->eflags.Bits.u1AF = 1;
6764 }
6765 else
6766 pCtx->eflags.Bits.u1AF = 0;
6767
6768 if (al >= 0x9a || fCarry)
6769 {
6770 pCtx->al += 0x60;
6771 pCtx->eflags.Bits.u1CF = 1;
6772 }
6773 else
6774 pCtx->eflags.Bits.u1CF = 0;
6775
6776 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6777 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6778 return VINF_SUCCESS;
6779}
6780
6781
6782/**
6783 * Implements 'DAS'.
6784 */
6785IEM_CIMPL_DEF_0(iemCImpl_das)
6786{
6787 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6788
6789 uint8_t const uInputAL = pCtx->al;
6790 bool const fCarry = pCtx->eflags.Bits.u1CF;
6791
6792 if ( pCtx->eflags.Bits.u1AF
6793 || (uInputAL & 0xf) >= 10)
6794 {
6795 pCtx->eflags.Bits.u1AF = 1;
6796 if (uInputAL < 6)
6797 pCtx->eflags.Bits.u1CF = 1;
6798 pCtx->al = uInputAL - 6;
6799 }
6800 else
6801 {
6802 pCtx->eflags.Bits.u1AF = 0;
6803 pCtx->eflags.Bits.u1CF = 0;
6804 }
6805
6806 if (uInputAL >= 0x9a || fCarry)
6807 {
6808 pCtx->al -= 0x60;
6809 pCtx->eflags.Bits.u1CF = 1;
6810 }
6811
6812 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6813 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6814 return VINF_SUCCESS;
6815}
6816
6817
6818/**
6819 * Implements 'AAA'.
6820 */
6821IEM_CIMPL_DEF_0(iemCImpl_aaa)
6822{
6823 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6824
6825 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
6826 {
6827 if ( pCtx->eflags.Bits.u1AF
6828 || (pCtx->ax & 0xf) >= 10)
6829 {
6830 iemAImpl_add_u16(&pCtx->ax, 0x106, &pCtx->eflags.u32);
6831 pCtx->eflags.Bits.u1AF = 1;
6832 pCtx->eflags.Bits.u1CF = 1;
6833#ifdef IEM_VERIFICATION_MODE_FULL
6834 pVCpu->iem.s.fUndefinedEFlags |= X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF;
6835#endif
6836 }
6837 else
6838 {
6839 iemHlpUpdateArithEFlagsU16(pVCpu, pCtx->ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6840 pCtx->eflags.Bits.u1AF = 0;
6841 pCtx->eflags.Bits.u1CF = 0;
6842 }
6843 pCtx->ax &= UINT16_C(0xff0f);
6844 }
6845 else
6846 {
6847 if ( pCtx->eflags.Bits.u1AF
6848 || (pCtx->ax & 0xf) >= 10)
6849 {
6850 pCtx->ax += UINT16_C(0x106);
6851 pCtx->eflags.Bits.u1AF = 1;
6852 pCtx->eflags.Bits.u1CF = 1;
6853 }
6854 else
6855 {
6856 pCtx->eflags.Bits.u1AF = 0;
6857 pCtx->eflags.Bits.u1CF = 0;
6858 }
6859 pCtx->ax &= UINT16_C(0xff0f);
6860 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6861 }
6862
6863 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6864 return VINF_SUCCESS;
6865}
6866
6867
6868/**
6869 * Implements 'AAS'.
6870 */
6871IEM_CIMPL_DEF_0(iemCImpl_aas)
6872{
6873 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6874
6875 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
6876 {
6877 if ( pCtx->eflags.Bits.u1AF
6878 || (pCtx->ax & 0xf) >= 10)
6879 {
6880 iemAImpl_sub_u16(&pCtx->ax, 0x106, &pCtx->eflags.u32);
6881 pCtx->eflags.Bits.u1AF = 1;
6882 pCtx->eflags.Bits.u1CF = 1;
6883#ifdef IEM_VERIFICATION_MODE_FULL
6884 pVCpu->iem.s.fUndefinedEFlags |= X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF;
6885#endif
6886 }
6887 else
6888 {
6889 iemHlpUpdateArithEFlagsU16(pVCpu, pCtx->ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6890 pCtx->eflags.Bits.u1AF = 0;
6891 pCtx->eflags.Bits.u1CF = 0;
6892 }
6893 pCtx->ax &= UINT16_C(0xff0f);
6894 }
6895 else
6896 {
6897 if ( pCtx->eflags.Bits.u1AF
6898 || (pCtx->ax & 0xf) >= 10)
6899 {
6900 pCtx->ax -= UINT16_C(0x106);
6901 pCtx->eflags.Bits.u1AF = 1;
6902 pCtx->eflags.Bits.u1CF = 1;
6903 }
6904 else
6905 {
6906 pCtx->eflags.Bits.u1AF = 0;
6907 pCtx->eflags.Bits.u1CF = 0;
6908 }
6909 pCtx->ax &= UINT16_C(0xff0f);
6910 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6911 }
6912
6913 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6914 return VINF_SUCCESS;
6915}
6916
6917
6918/**
6919 * Implements the 16-bit version of 'BOUND'.
6920 *
6921 * @note We have separate 16-bit and 32-bit variants of this function due to
6922 * the decoder using unsigned parameters, whereas we want signed one to
6923 * do the job. This is significant for a recompiler.
6924 */
6925IEM_CIMPL_DEF_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound)
6926{
6927 /*
6928 * Check if the index is inside the bounds, otherwise raise #BR.
6929 */
6930 if ( idxArray >= idxLowerBound
6931 && idxArray <= idxUpperBound)
6932 {
6933 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6934 return VINF_SUCCESS;
6935 }
6936
6937 return iemRaiseBoundRangeExceeded(pVCpu);
6938}
6939
6940
6941/**
6942 * Implements the 32-bit version of 'BOUND'.
6943 */
6944IEM_CIMPL_DEF_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound)
6945{
6946 /*
6947 * Check if the index is inside the bounds, otherwise raise #BR.
6948 */
6949 if ( idxArray >= idxLowerBound
6950 && idxArray <= idxUpperBound)
6951 {
6952 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6953 return VINF_SUCCESS;
6954 }
6955
6956 return iemRaiseBoundRangeExceeded(pVCpu);
6957}
6958
6959
6960
6961/*
6962 * Instantiate the various string operation combinations.
6963 */
6964#define OP_SIZE 8
6965#define ADDR_SIZE 16
6966#include "IEMAllCImplStrInstr.cpp.h"
6967#define OP_SIZE 8
6968#define ADDR_SIZE 32
6969#include "IEMAllCImplStrInstr.cpp.h"
6970#define OP_SIZE 8
6971#define ADDR_SIZE 64
6972#include "IEMAllCImplStrInstr.cpp.h"
6973
6974#define OP_SIZE 16
6975#define ADDR_SIZE 16
6976#include "IEMAllCImplStrInstr.cpp.h"
6977#define OP_SIZE 16
6978#define ADDR_SIZE 32
6979#include "IEMAllCImplStrInstr.cpp.h"
6980#define OP_SIZE 16
6981#define ADDR_SIZE 64
6982#include "IEMAllCImplStrInstr.cpp.h"
6983
6984#define OP_SIZE 32
6985#define ADDR_SIZE 16
6986#include "IEMAllCImplStrInstr.cpp.h"
6987#define OP_SIZE 32
6988#define ADDR_SIZE 32
6989#include "IEMAllCImplStrInstr.cpp.h"
6990#define OP_SIZE 32
6991#define ADDR_SIZE 64
6992#include "IEMAllCImplStrInstr.cpp.h"
6993
6994#define OP_SIZE 64
6995#define ADDR_SIZE 32
6996#include "IEMAllCImplStrInstr.cpp.h"
6997#define OP_SIZE 64
6998#define ADDR_SIZE 64
6999#include "IEMAllCImplStrInstr.cpp.h"
7000
7001
7002/**
7003 * Implements 'XGETBV'.
7004 */
7005IEM_CIMPL_DEF_0(iemCImpl_xgetbv)
7006{
7007 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7008 if (pCtx->cr4 & X86_CR4_OSXSAVE)
7009 {
7010 uint32_t uEcx = pCtx->ecx;
7011 switch (uEcx)
7012 {
7013 case 0:
7014 break;
7015
7016 case 1: /** @todo Implement XCR1 support. */
7017 default:
7018 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx));
7019 return iemRaiseGeneralProtectionFault0(pVCpu);
7020
7021 }
7022 pCtx->rax = RT_LO_U32(pCtx->aXcr[uEcx]);
7023 pCtx->rdx = RT_HI_U32(pCtx->aXcr[uEcx]);
7024
7025 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7026 return VINF_SUCCESS;
7027 }
7028 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n"));
7029 return iemRaiseUndefinedOpcode(pVCpu);
7030}
7031
7032
7033/**
7034 * Implements 'XSETBV'.
7035 */
7036IEM_CIMPL_DEF_0(iemCImpl_xsetbv)
7037{
7038 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7039 if (pCtx->cr4 & X86_CR4_OSXSAVE)
7040 {
7041 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
7042 {
7043 Log2(("xsetbv: Guest intercept -> #VMEXIT\n"));
7044 IEM_SVM_UPDATE_NRIP(pVCpu);
7045 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7046 }
7047
7048 if (pVCpu->iem.s.uCpl == 0)
7049 {
7050 uint32_t uEcx = pCtx->ecx;
7051 uint64_t uNewValue = RT_MAKE_U64(pCtx->eax, pCtx->edx);
7052 switch (uEcx)
7053 {
7054 case 0:
7055 {
7056 int rc = CPUMSetGuestXcr0(pVCpu, uNewValue);
7057 if (rc == VINF_SUCCESS)
7058 break;
7059 Assert(rc == VERR_CPUM_RAISE_GP_0);
7060 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
7061 return iemRaiseGeneralProtectionFault0(pVCpu);
7062 }
7063
7064 case 1: /** @todo Implement XCR1 support. */
7065 default:
7066 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
7067 return iemRaiseGeneralProtectionFault0(pVCpu);
7068
7069 }
7070
7071 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7072 return VINF_SUCCESS;
7073 }
7074
7075 Log(("xsetbv cpl=%u -> GP(0)\n", pVCpu->iem.s.uCpl));
7076 return iemRaiseGeneralProtectionFault0(pVCpu);
7077 }
7078 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n"));
7079 return iemRaiseUndefinedOpcode(pVCpu);
7080}
7081
7082#ifdef IN_RING3
7083
7084/** Argument package for iemCImpl_cmpxchg16b_fallback_rendezvous_callback. */
7085struct IEMCIMPLCX16ARGS
7086{
7087 PRTUINT128U pu128Dst;
7088 PRTUINT128U pu128RaxRdx;
7089 PRTUINT128U pu128RbxRcx;
7090 uint32_t *pEFlags;
7091# ifdef VBOX_STRICT
7092 uint32_t cCalls;
7093# endif
7094};
7095
7096/**
7097 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
7098 * Worker for iemCImpl_cmpxchg16b_fallback_rendezvous}
7099 */
7100static DECLCALLBACK(VBOXSTRICTRC) iemCImpl_cmpxchg16b_fallback_rendezvous_callback(PVM pVM, PVMCPU pVCpu, void *pvUser)
7101{
7102 RT_NOREF(pVM, pVCpu);
7103 struct IEMCIMPLCX16ARGS *pArgs = (struct IEMCIMPLCX16ARGS *)pvUser;
7104# ifdef VBOX_STRICT
7105 Assert(pArgs->cCalls == 0);
7106 pArgs->cCalls++;
7107# endif
7108
7109 iemAImpl_cmpxchg16b_fallback(pArgs->pu128Dst, pArgs->pu128RaxRdx, pArgs->pu128RbxRcx, pArgs->pEFlags);
7110 return VINF_SUCCESS;
7111}
7112
7113#endif /* IN_RING3 */
7114
7115/**
7116 * Implements 'CMPXCHG16B' fallback using rendezvous.
7117 */
7118IEM_CIMPL_DEF_4(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
7119 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags)
7120{
7121#ifdef IN_RING3
7122 struct IEMCIMPLCX16ARGS Args;
7123 Args.pu128Dst = pu128Dst;
7124 Args.pu128RaxRdx = pu128RaxRdx;
7125 Args.pu128RbxRcx = pu128RbxRcx;
7126 Args.pEFlags = pEFlags;
7127# ifdef VBOX_STRICT
7128 Args.cCalls = 0;
7129# endif
7130 VBOXSTRICTRC rcStrict = VMMR3EmtRendezvous(pVCpu->CTX_SUFF(pVM), VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
7131 iemCImpl_cmpxchg16b_fallback_rendezvous_callback, &Args);
7132 Assert(Args.cCalls == 1);
7133 if (rcStrict == VINF_SUCCESS)
7134 {
7135 /* Duplicated tail code. */
7136 rcStrict = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_RW);
7137 if (rcStrict == VINF_SUCCESS)
7138 {
7139 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
7140 pCtx->eflags.u = *pEFlags; /* IEM_MC_COMMIT_EFLAGS */
7141 if (!(*pEFlags & X86_EFL_ZF))
7142 {
7143 pCtx->rax = pu128RaxRdx->s.Lo;
7144 pCtx->rdx = pu128RaxRdx->s.Hi;
7145 }
7146 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7147 }
7148 }
7149 return rcStrict;
7150#else
7151 RT_NOREF(pVCpu, cbInstr, pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags);
7152 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; /* This should get us to ring-3 for now. Should perhaps be replaced later. */
7153#endif
7154}
7155
7156
7157/**
7158 * Implements 'CLFLUSH' and 'CLFLUSHOPT'.
7159 *
7160 * This is implemented in C because it triggers a load like behviour without
7161 * actually reading anything. Since that's not so common, it's implemented
7162 * here.
7163 *
7164 * @param iEffSeg The effective segment.
7165 * @param GCPtrEff The address of the image.
7166 */
7167IEM_CIMPL_DEF_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
7168{
7169 /*
7170 * Pretend to do a load w/o reading (see also iemCImpl_monitor and iemMemMap).
7171 */
7172 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrEff);
7173 if (rcStrict == VINF_SUCCESS)
7174 {
7175 RTGCPHYS GCPhysMem;
7176 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrEff, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
7177 if (rcStrict == VINF_SUCCESS)
7178 {
7179 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7180 return VINF_SUCCESS;
7181 }
7182 }
7183
7184 return rcStrict;
7185}
7186
7187
7188/**
7189 * Implements 'FINIT' and 'FNINIT'.
7190 *
7191 * @param fCheckXcpts Whether to check for umasked pending exceptions or
7192 * not.
7193 */
7194IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
7195{
7196 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7197
7198 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
7199 return iemRaiseDeviceNotAvailable(pVCpu);
7200
7201 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
7202 if (fCheckXcpts && TODO )
7203 return iemRaiseMathFault(pVCpu);
7204 */
7205
7206 PX86XSAVEAREA pXState = pCtx->CTX_SUFF(pXState);
7207 pXState->x87.FCW = 0x37f;
7208 pXState->x87.FSW = 0;
7209 pXState->x87.FTW = 0x00; /* 0 - empty. */
7210 pXState->x87.FPUDP = 0;
7211 pXState->x87.DS = 0; //??
7212 pXState->x87.Rsrvd2= 0;
7213 pXState->x87.FPUIP = 0;
7214 pXState->x87.CS = 0; //??
7215 pXState->x87.Rsrvd1= 0;
7216 pXState->x87.FOP = 0;
7217
7218 iemHlpUsedFpu(pVCpu);
7219 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7220 return VINF_SUCCESS;
7221}
7222
7223
7224/**
7225 * Implements 'FXSAVE'.
7226 *
7227 * @param iEffSeg The effective segment.
7228 * @param GCPtrEff The address of the image.
7229 * @param enmEffOpSize The operand size (only REX.W really matters).
7230 */
7231IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
7232{
7233 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7234
7235 /*
7236 * Raise exceptions.
7237 */
7238 if (pCtx->cr0 & X86_CR0_EM)
7239 return iemRaiseUndefinedOpcode(pVCpu);
7240 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
7241 return iemRaiseDeviceNotAvailable(pVCpu);
7242 if (GCPtrEff & 15)
7243 {
7244 /** @todo CPU/VM detection possible! \#AC might not be signal for
7245 * all/any misalignment sizes, intel says its an implementation detail. */
7246 if ( (pCtx->cr0 & X86_CR0_AM)
7247 && pCtx->eflags.Bits.u1AC
7248 && pVCpu->iem.s.uCpl == 3)
7249 return iemRaiseAlignmentCheckException(pVCpu);
7250 return iemRaiseGeneralProtectionFault0(pVCpu);
7251 }
7252
7253 /*
7254 * Access the memory.
7255 */
7256 void *pvMem512;
7257 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7258 if (rcStrict != VINF_SUCCESS)
7259 return rcStrict;
7260 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
7261 PCX86FXSTATE pSrc = &pCtx->CTX_SUFF(pXState)->x87;
7262
7263 /*
7264 * Store the registers.
7265 */
7266 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
7267 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
7268
7269 /* common for all formats */
7270 pDst->FCW = pSrc->FCW;
7271 pDst->FSW = pSrc->FSW;
7272 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
7273 pDst->FOP = pSrc->FOP;
7274 pDst->MXCSR = pSrc->MXCSR;
7275 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
7276 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
7277 {
7278 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
7279 * them for now... */
7280 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
7281 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
7282 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
7283 pDst->aRegs[i].au32[3] = 0;
7284 }
7285
7286 /* FPU IP, CS, DP and DS. */
7287 pDst->FPUIP = pSrc->FPUIP;
7288 pDst->CS = pSrc->CS;
7289 pDst->FPUDP = pSrc->FPUDP;
7290 pDst->DS = pSrc->DS;
7291 if (enmEffOpSize == IEMMODE_64BIT)
7292 {
7293 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
7294 pDst->Rsrvd1 = pSrc->Rsrvd1;
7295 pDst->Rsrvd2 = pSrc->Rsrvd2;
7296 pDst->au32RsrvdForSoftware[0] = 0;
7297 }
7298 else
7299 {
7300 pDst->Rsrvd1 = 0;
7301 pDst->Rsrvd2 = 0;
7302 pDst->au32RsrvdForSoftware[0] = X86_FXSTATE_RSVD_32BIT_MAGIC;
7303 }
7304
7305 /* XMM registers. */
7306 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
7307 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
7308 || pVCpu->iem.s.uCpl != 0)
7309 {
7310 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7311 for (uint32_t i = 0; i < cXmmRegs; i++)
7312 pDst->aXMM[i] = pSrc->aXMM[i];
7313 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
7314 * right? */
7315 }
7316
7317 /*
7318 * Commit the memory.
7319 */
7320 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7321 if (rcStrict != VINF_SUCCESS)
7322 return rcStrict;
7323
7324 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7325 return VINF_SUCCESS;
7326}
7327
7328
7329/**
7330 * Implements 'FXRSTOR'.
7331 *
7332 * @param GCPtrEff The address of the image.
7333 * @param enmEffOpSize The operand size (only REX.W really matters).
7334 */
7335IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
7336{
7337 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7338
7339 /*
7340 * Raise exceptions.
7341 */
7342 if (pCtx->cr0 & X86_CR0_EM)
7343 return iemRaiseUndefinedOpcode(pVCpu);
7344 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
7345 return iemRaiseDeviceNotAvailable(pVCpu);
7346 if (GCPtrEff & 15)
7347 {
7348 /** @todo CPU/VM detection possible! \#AC might not be signal for
7349 * all/any misalignment sizes, intel says its an implementation detail. */
7350 if ( (pCtx->cr0 & X86_CR0_AM)
7351 && pCtx->eflags.Bits.u1AC
7352 && pVCpu->iem.s.uCpl == 3)
7353 return iemRaiseAlignmentCheckException(pVCpu);
7354 return iemRaiseGeneralProtectionFault0(pVCpu);
7355 }
7356
7357 /*
7358 * Access the memory.
7359 */
7360 void *pvMem512;
7361 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
7362 if (rcStrict != VINF_SUCCESS)
7363 return rcStrict;
7364 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
7365 PX86FXSTATE pDst = &pCtx->CTX_SUFF(pXState)->x87;
7366
7367 /*
7368 * Check the state for stuff which will #GP(0).
7369 */
7370 uint32_t const fMXCSR = pSrc->MXCSR;
7371 uint32_t const fMXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
7372 if (fMXCSR & ~fMXCSR_MASK)
7373 {
7374 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
7375 return iemRaiseGeneralProtectionFault0(pVCpu);
7376 }
7377
7378 /*
7379 * Load the registers.
7380 */
7381 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
7382 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
7383
7384 /* common for all formats */
7385 pDst->FCW = pSrc->FCW;
7386 pDst->FSW = pSrc->FSW;
7387 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
7388 pDst->FOP = pSrc->FOP;
7389 pDst->MXCSR = fMXCSR;
7390 /* (MXCSR_MASK is read-only) */
7391 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
7392 {
7393 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
7394 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
7395 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
7396 pDst->aRegs[i].au32[3] = 0;
7397 }
7398
7399 /* FPU IP, CS, DP and DS. */
7400 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7401 {
7402 pDst->FPUIP = pSrc->FPUIP;
7403 pDst->CS = pSrc->CS;
7404 pDst->Rsrvd1 = pSrc->Rsrvd1;
7405 pDst->FPUDP = pSrc->FPUDP;
7406 pDst->DS = pSrc->DS;
7407 pDst->Rsrvd2 = pSrc->Rsrvd2;
7408 }
7409 else
7410 {
7411 pDst->FPUIP = pSrc->FPUIP;
7412 pDst->CS = pSrc->CS;
7413 pDst->Rsrvd1 = 0;
7414 pDst->FPUDP = pSrc->FPUDP;
7415 pDst->DS = pSrc->DS;
7416 pDst->Rsrvd2 = 0;
7417 }
7418
7419 /* XMM registers. */
7420 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
7421 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
7422 || pVCpu->iem.s.uCpl != 0)
7423 {
7424 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7425 for (uint32_t i = 0; i < cXmmRegs; i++)
7426 pDst->aXMM[i] = pSrc->aXMM[i];
7427 }
7428
7429 /*
7430 * Commit the memory.
7431 */
7432 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
7433 if (rcStrict != VINF_SUCCESS)
7434 return rcStrict;
7435
7436 iemHlpUsedFpu(pVCpu);
7437 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7438 return VINF_SUCCESS;
7439}
7440
7441
7442/**
7443 * Implements 'XSAVE'.
7444 *
7445 * @param iEffSeg The effective segment.
7446 * @param GCPtrEff The address of the image.
7447 * @param enmEffOpSize The operand size (only REX.W really matters).
7448 */
7449IEM_CIMPL_DEF_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
7450{
7451 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7452
7453 /*
7454 * Raise exceptions.
7455 */
7456 if (!(pCtx->cr4 & X86_CR4_OSXSAVE))
7457 return iemRaiseUndefinedOpcode(pVCpu);
7458 if (pCtx->cr0 & X86_CR0_TS)
7459 return iemRaiseDeviceNotAvailable(pVCpu);
7460 if (GCPtrEff & 63)
7461 {
7462 /** @todo CPU/VM detection possible! \#AC might not be signal for
7463 * all/any misalignment sizes, intel says its an implementation detail. */
7464 if ( (pCtx->cr0 & X86_CR0_AM)
7465 && pCtx->eflags.Bits.u1AC
7466 && pVCpu->iem.s.uCpl == 3)
7467 return iemRaiseAlignmentCheckException(pVCpu);
7468 return iemRaiseGeneralProtectionFault0(pVCpu);
7469 }
7470
7471 /*
7472 * Calc the requested mask
7473 */
7474 uint64_t const fReqComponents = RT_MAKE_U64(pCtx->eax, pCtx->edx) & pCtx->aXcr[0];
7475 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
7476 uint64_t const fXInUse = pCtx->aXcr[0];
7477
7478/** @todo figure out the exact protocol for the memory access. Currently we
7479 * just need this crap to work halfways to make it possible to test
7480 * AVX instructions. */
7481/** @todo figure out the XINUSE and XMODIFIED */
7482
7483 /*
7484 * Access the x87 memory state.
7485 */
7486 /* The x87+SSE state. */
7487 void *pvMem512;
7488 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7489 if (rcStrict != VINF_SUCCESS)
7490 return rcStrict;
7491 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
7492 PCX86FXSTATE pSrc = &pCtx->CTX_SUFF(pXState)->x87;
7493
7494 /* The header. */
7495 PX86XSAVEHDR pHdr;
7496 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW);
7497 if (rcStrict != VINF_SUCCESS)
7498 return rcStrict;
7499
7500 /*
7501 * Store the X87 state.
7502 */
7503 if (fReqComponents & XSAVE_C_X87)
7504 {
7505 /* common for all formats */
7506 pDst->FCW = pSrc->FCW;
7507 pDst->FSW = pSrc->FSW;
7508 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
7509 pDst->FOP = pSrc->FOP;
7510 pDst->FPUIP = pSrc->FPUIP;
7511 pDst->CS = pSrc->CS;
7512 pDst->FPUDP = pSrc->FPUDP;
7513 pDst->DS = pSrc->DS;
7514 if (enmEffOpSize == IEMMODE_64BIT)
7515 {
7516 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
7517 pDst->Rsrvd1 = pSrc->Rsrvd1;
7518 pDst->Rsrvd2 = pSrc->Rsrvd2;
7519 pDst->au32RsrvdForSoftware[0] = 0;
7520 }
7521 else
7522 {
7523 pDst->Rsrvd1 = 0;
7524 pDst->Rsrvd2 = 0;
7525 pDst->au32RsrvdForSoftware[0] = X86_FXSTATE_RSVD_32BIT_MAGIC;
7526 }
7527 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
7528 {
7529 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
7530 * them for now... */
7531 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
7532 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
7533 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
7534 pDst->aRegs[i].au32[3] = 0;
7535 }
7536
7537 }
7538
7539 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
7540 {
7541 pDst->MXCSR = pSrc->MXCSR;
7542 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
7543 }
7544
7545 if (fReqComponents & XSAVE_C_SSE)
7546 {
7547 /* XMM registers. */
7548 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7549 for (uint32_t i = 0; i < cXmmRegs; i++)
7550 pDst->aXMM[i] = pSrc->aXMM[i];
7551 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
7552 * right? */
7553 }
7554
7555 /* Commit the x87 state bits. (probably wrong) */
7556 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7557 if (rcStrict != VINF_SUCCESS)
7558 return rcStrict;
7559
7560 /*
7561 * Store AVX state.
7562 */
7563 if (fReqComponents & XSAVE_C_YMM)
7564 {
7565 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
7566 AssertLogRelReturn(pCtx->aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
7567 PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI);
7568 PX86XSAVEYMMHI pCompDst;
7569 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + pCtx->aoffXState[XSAVE_C_YMM_BIT],
7570 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7571 if (rcStrict != VINF_SUCCESS)
7572 return rcStrict;
7573
7574 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7575 for (uint32_t i = 0; i < cXmmRegs; i++)
7576 pCompDst->aYmmHi[i] = pCompSrc->aYmmHi[i];
7577
7578 rcStrict = iemMemCommitAndUnmap(pVCpu, pCompDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7579 if (rcStrict != VINF_SUCCESS)
7580 return rcStrict;
7581 }
7582
7583 /*
7584 * Update the header.
7585 */
7586 pHdr->bmXState = (pHdr->bmXState & ~fReqComponents)
7587 | (fReqComponents & fXInUse);
7588
7589 rcStrict = iemMemCommitAndUnmap(pVCpu, pHdr, IEM_ACCESS_DATA_RW);
7590 if (rcStrict != VINF_SUCCESS)
7591 return rcStrict;
7592
7593 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7594 return VINF_SUCCESS;
7595}
7596
7597
7598/**
7599 * Implements 'XRSTOR'.
7600 *
7601 * @param iEffSeg The effective segment.
7602 * @param GCPtrEff The address of the image.
7603 * @param enmEffOpSize The operand size (only REX.W really matters).
7604 */
7605IEM_CIMPL_DEF_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
7606{
7607 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7608
7609 /*
7610 * Raise exceptions.
7611 */
7612 if (!(pCtx->cr4 & X86_CR4_OSXSAVE))
7613 return iemRaiseUndefinedOpcode(pVCpu);
7614 if (pCtx->cr0 & X86_CR0_TS)
7615 return iemRaiseDeviceNotAvailable(pVCpu);
7616 if (GCPtrEff & 63)
7617 {
7618 /** @todo CPU/VM detection possible! \#AC might not be signal for
7619 * all/any misalignment sizes, intel says its an implementation detail. */
7620 if ( (pCtx->cr0 & X86_CR0_AM)
7621 && pCtx->eflags.Bits.u1AC
7622 && pVCpu->iem.s.uCpl == 3)
7623 return iemRaiseAlignmentCheckException(pVCpu);
7624 return iemRaiseGeneralProtectionFault0(pVCpu);
7625 }
7626
7627/** @todo figure out the exact protocol for the memory access. Currently we
7628 * just need this crap to work halfways to make it possible to test
7629 * AVX instructions. */
7630/** @todo figure out the XINUSE and XMODIFIED */
7631
7632 /*
7633 * Access the x87 memory state.
7634 */
7635 /* The x87+SSE state. */
7636 void *pvMem512;
7637 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
7638 if (rcStrict != VINF_SUCCESS)
7639 return rcStrict;
7640 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
7641 PX86FXSTATE pDst = &pCtx->CTX_SUFF(pXState)->x87;
7642
7643 /*
7644 * Calc the requested mask
7645 */
7646 PX86XSAVEHDR pHdrDst = &pCtx->CTX_SUFF(pXState)->Hdr;
7647 PCX86XSAVEHDR pHdrSrc;
7648 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_R);
7649 if (rcStrict != VINF_SUCCESS)
7650 return rcStrict;
7651
7652 uint64_t const fReqComponents = RT_MAKE_U64(pCtx->eax, pCtx->edx) & pCtx->aXcr[0];
7653 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
7654 //uint64_t const fXInUse = pCtx->aXcr[0];
7655 uint64_t const fRstorMask = pHdrSrc->bmXState;
7656 uint64_t const fCompMask = pHdrSrc->bmXComp;
7657
7658 AssertLogRelReturn(!(fCompMask & XSAVE_C_X), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
7659
7660 uint32_t const cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7661
7662 /* We won't need this any longer. */
7663 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pHdrSrc, IEM_ACCESS_DATA_R);
7664 if (rcStrict != VINF_SUCCESS)
7665 return rcStrict;
7666
7667 /*
7668 * Store the X87 state.
7669 */
7670 if (fReqComponents & XSAVE_C_X87)
7671 {
7672 if (fRstorMask & XSAVE_C_X87)
7673 {
7674 pDst->FCW = pSrc->FCW;
7675 pDst->FSW = pSrc->FSW;
7676 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
7677 pDst->FOP = pSrc->FOP;
7678 pDst->FPUIP = pSrc->FPUIP;
7679 pDst->CS = pSrc->CS;
7680 pDst->FPUDP = pSrc->FPUDP;
7681 pDst->DS = pSrc->DS;
7682 if (enmEffOpSize == IEMMODE_64BIT)
7683 {
7684 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
7685 pDst->Rsrvd1 = pSrc->Rsrvd1;
7686 pDst->Rsrvd2 = pSrc->Rsrvd2;
7687 }
7688 else
7689 {
7690 pDst->Rsrvd1 = 0;
7691 pDst->Rsrvd2 = 0;
7692 }
7693 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
7694 {
7695 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
7696 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
7697 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
7698 pDst->aRegs[i].au32[3] = 0;
7699 }
7700 }
7701 else
7702 {
7703 pDst->FCW = 0x37f;
7704 pDst->FSW = 0;
7705 pDst->FTW = 0x00; /* 0 - empty. */
7706 pDst->FPUDP = 0;
7707 pDst->DS = 0; //??
7708 pDst->Rsrvd2= 0;
7709 pDst->FPUIP = 0;
7710 pDst->CS = 0; //??
7711 pDst->Rsrvd1= 0;
7712 pDst->FOP = 0;
7713 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
7714 {
7715 pDst->aRegs[i].au32[0] = 0;
7716 pDst->aRegs[i].au32[1] = 0;
7717 pDst->aRegs[i].au32[2] = 0;
7718 pDst->aRegs[i].au32[3] = 0;
7719 }
7720 }
7721 pHdrDst->bmXState |= XSAVE_C_X87; /* playing safe for now */
7722 }
7723
7724 /* MXCSR */
7725 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
7726 {
7727 if (fRstorMask & (XSAVE_C_SSE | XSAVE_C_YMM))
7728 pDst->MXCSR = pSrc->MXCSR;
7729 else
7730 pDst->MXCSR = 0x1f80;
7731 }
7732
7733 /* XMM registers. */
7734 if (fReqComponents & XSAVE_C_SSE)
7735 {
7736 if (fRstorMask & XSAVE_C_SSE)
7737 {
7738 for (uint32_t i = 0; i < cXmmRegs; i++)
7739 pDst->aXMM[i] = pSrc->aXMM[i];
7740 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
7741 * right? */
7742 }
7743 else
7744 {
7745 for (uint32_t i = 0; i < cXmmRegs; i++)
7746 {
7747 pDst->aXMM[i].au64[0] = 0;
7748 pDst->aXMM[i].au64[1] = 0;
7749 }
7750 }
7751 pHdrDst->bmXState |= XSAVE_C_SSE; /* playing safe for now */
7752 }
7753
7754 /* Unmap the x87 state bits (so we've don't run out of mapping). */
7755 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
7756 if (rcStrict != VINF_SUCCESS)
7757 return rcStrict;
7758
7759 /*
7760 * Restore AVX state.
7761 */
7762 if (fReqComponents & XSAVE_C_YMM)
7763 {
7764 AssertLogRelReturn(pCtx->aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
7765 PX86XSAVEYMMHI pCompDst = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_YMM_BIT, PX86XSAVEYMMHI);
7766
7767 if (fRstorMask & XSAVE_C_YMM)
7768 {
7769 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
7770 PCX86XSAVEYMMHI pCompSrc;
7771 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, sizeof(*pCompDst),
7772 iEffSeg, GCPtrEff + pCtx->aoffXState[XSAVE_C_YMM_BIT], IEM_ACCESS_DATA_R);
7773 if (rcStrict != VINF_SUCCESS)
7774 return rcStrict;
7775
7776 for (uint32_t i = 0; i < cXmmRegs; i++)
7777 {
7778 pCompDst->aYmmHi[i].au64[0] = pCompSrc->aYmmHi[i].au64[0];
7779 pCompDst->aYmmHi[i].au64[1] = pCompSrc->aYmmHi[i].au64[1];
7780 }
7781
7782 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pCompSrc, IEM_ACCESS_DATA_R);
7783 if (rcStrict != VINF_SUCCESS)
7784 return rcStrict;
7785 }
7786 else
7787 {
7788 for (uint32_t i = 0; i < cXmmRegs; i++)
7789 {
7790 pCompDst->aYmmHi[i].au64[0] = 0;
7791 pCompDst->aYmmHi[i].au64[1] = 0;
7792 }
7793 }
7794 pHdrDst->bmXState |= XSAVE_C_YMM; /* playing safe for now */
7795 }
7796
7797 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7798 return VINF_SUCCESS;
7799}
7800
7801
7802
7803
7804/**
7805 * Implements 'STMXCSR'.
7806 *
7807 * @param GCPtrEff The address of the image.
7808 */
7809IEM_CIMPL_DEF_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
7810{
7811 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7812
7813 /*
7814 * Raise exceptions.
7815 */
7816 if ( !(pCtx->cr0 & X86_CR0_EM)
7817 && (pCtx->cr4 & X86_CR4_OSFXSR))
7818 {
7819 if (!(pCtx->cr0 & X86_CR0_TS))
7820 {
7821 /*
7822 * Do the job.
7823 */
7824 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pCtx->CTX_SUFF(pXState)->x87.MXCSR);
7825 if (rcStrict == VINF_SUCCESS)
7826 {
7827 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7828 return VINF_SUCCESS;
7829 }
7830 return rcStrict;
7831 }
7832 return iemRaiseDeviceNotAvailable(pVCpu);
7833 }
7834 return iemRaiseUndefinedOpcode(pVCpu);
7835}
7836
7837
7838/**
7839 * Implements 'VSTMXCSR'.
7840 *
7841 * @param GCPtrEff The address of the image.
7842 */
7843IEM_CIMPL_DEF_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
7844{
7845 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7846
7847 /*
7848 * Raise exceptions.
7849 */
7850 if ( ( !IEM_IS_GUEST_CPU_AMD(pVCpu)
7851 ? (pCtx->aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM)
7852 : !(pCtx->cr0 & X86_CR0_EM)) /* AMD Jaguar CPU (f0x16,m0,s1) behaviour */
7853 && (pCtx->cr4 & X86_CR4_OSXSAVE))
7854 {
7855 if (!(pCtx->cr0 & X86_CR0_TS))
7856 {
7857 /*
7858 * Do the job.
7859 */
7860 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pCtx->CTX_SUFF(pXState)->x87.MXCSR);
7861 if (rcStrict == VINF_SUCCESS)
7862 {
7863 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7864 return VINF_SUCCESS;
7865 }
7866 return rcStrict;
7867 }
7868 return iemRaiseDeviceNotAvailable(pVCpu);
7869 }
7870 return iemRaiseUndefinedOpcode(pVCpu);
7871}
7872
7873
7874/**
7875 * Implements 'LDMXCSR'.
7876 *
7877 * @param GCPtrEff The address of the image.
7878 */
7879IEM_CIMPL_DEF_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
7880{
7881 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7882
7883 /*
7884 * Raise exceptions.
7885 */
7886 /** @todo testcase - order of LDMXCSR faults. Does \#PF, \#GP and \#SS
7887 * happen after or before \#UD and \#EM? */
7888 if ( !(pCtx->cr0 & X86_CR0_EM)
7889 && (pCtx->cr4 & X86_CR4_OSFXSR))
7890 {
7891 if (!(pCtx->cr0 & X86_CR0_TS))
7892 {
7893 /*
7894 * Do the job.
7895 */
7896 uint32_t fNewMxCsr;
7897 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, &fNewMxCsr, iEffSeg, GCPtrEff);
7898 if (rcStrict == VINF_SUCCESS)
7899 {
7900 uint32_t const fMxCsrMask = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
7901 if (!(fNewMxCsr & ~fMxCsrMask))
7902 {
7903 pCtx->CTX_SUFF(pXState)->x87.MXCSR = fNewMxCsr;
7904 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7905 return VINF_SUCCESS;
7906 }
7907 Log(("lddmxcsr: New MXCSR=%#RX32 & ~MASK=%#RX32 = %#RX32 -> #GP(0)\n",
7908 fNewMxCsr, fMxCsrMask, fNewMxCsr & ~fMxCsrMask));
7909 return iemRaiseGeneralProtectionFault0(pVCpu);
7910 }
7911 return rcStrict;
7912 }
7913 return iemRaiseDeviceNotAvailable(pVCpu);
7914 }
7915 return iemRaiseUndefinedOpcode(pVCpu);
7916}
7917
7918
7919/**
7920 * Commmon routine for fnstenv and fnsave.
7921 *
7922 * @param uPtr Where to store the state.
7923 * @param pCtx The CPU context.
7924 */
7925static void iemCImplCommonFpuStoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
7926{
7927 PCX86FXSTATE pSrcX87 = &pCtx->CTX_SUFF(pXState)->x87;
7928 if (enmEffOpSize == IEMMODE_16BIT)
7929 {
7930 uPtr.pu16[0] = pSrcX87->FCW;
7931 uPtr.pu16[1] = pSrcX87->FSW;
7932 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87);
7933 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7934 {
7935 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
7936 * protected mode or long mode and we save it in real mode? And vice
7937 * versa? And with 32-bit operand size? I think CPU is storing the
7938 * effective address ((CS << 4) + IP) in the offset register and not
7939 * doing any address calculations here. */
7940 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP;
7941 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP;
7942 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP;
7943 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000);
7944 }
7945 else
7946 {
7947 uPtr.pu16[3] = pSrcX87->FPUIP;
7948 uPtr.pu16[4] = pSrcX87->CS;
7949 uPtr.pu16[5] = pSrcX87->FPUDP;
7950 uPtr.pu16[6] = pSrcX87->DS;
7951 }
7952 }
7953 else
7954 {
7955 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
7956 uPtr.pu16[0*2] = pSrcX87->FCW;
7957 uPtr.pu16[0*2+1] = 0xffff; /* (0xffff observed on intel skylake.) */
7958 uPtr.pu16[1*2] = pSrcX87->FSW;
7959 uPtr.pu16[1*2+1] = 0xffff;
7960 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87);
7961 uPtr.pu16[2*2+1] = 0xffff;
7962 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7963 {
7964 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP;
7965 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP;
7966 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP;
7967 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4;
7968 }
7969 else
7970 {
7971 uPtr.pu32[3] = pSrcX87->FPUIP;
7972 uPtr.pu16[4*2] = pSrcX87->CS;
7973 uPtr.pu16[4*2+1] = pSrcX87->FOP;
7974 uPtr.pu32[5] = pSrcX87->FPUDP;
7975 uPtr.pu16[6*2] = pSrcX87->DS;
7976 uPtr.pu16[6*2+1] = 0xffff;
7977 }
7978 }
7979}
7980
7981
7982/**
7983 * Commmon routine for fldenv and frstor
7984 *
7985 * @param uPtr Where to store the state.
7986 * @param pCtx The CPU context.
7987 */
7988static void iemCImplCommonFpuRestoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
7989{
7990 PX86FXSTATE pDstX87 = &pCtx->CTX_SUFF(pXState)->x87;
7991 if (enmEffOpSize == IEMMODE_16BIT)
7992 {
7993 pDstX87->FCW = uPtr.pu16[0];
7994 pDstX87->FSW = uPtr.pu16[1];
7995 pDstX87->FTW = uPtr.pu16[2];
7996 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7997 {
7998 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
7999 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
8000 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
8001 pDstX87->CS = 0;
8002 pDstX87->Rsrvd1= 0;
8003 pDstX87->DS = 0;
8004 pDstX87->Rsrvd2= 0;
8005 }
8006 else
8007 {
8008 pDstX87->FPUIP = uPtr.pu16[3];
8009 pDstX87->CS = uPtr.pu16[4];
8010 pDstX87->Rsrvd1= 0;
8011 pDstX87->FPUDP = uPtr.pu16[5];
8012 pDstX87->DS = uPtr.pu16[6];
8013 pDstX87->Rsrvd2= 0;
8014 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
8015 }
8016 }
8017 else
8018 {
8019 pDstX87->FCW = uPtr.pu16[0*2];
8020 pDstX87->FSW = uPtr.pu16[1*2];
8021 pDstX87->FTW = uPtr.pu16[2*2];
8022 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
8023 {
8024 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
8025 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
8026 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
8027 pDstX87->CS = 0;
8028 pDstX87->Rsrvd1= 0;
8029 pDstX87->DS = 0;
8030 pDstX87->Rsrvd2= 0;
8031 }
8032 else
8033 {
8034 pDstX87->FPUIP = uPtr.pu32[3];
8035 pDstX87->CS = uPtr.pu16[4*2];
8036 pDstX87->Rsrvd1= 0;
8037 pDstX87->FOP = uPtr.pu16[4*2+1];
8038 pDstX87->FPUDP = uPtr.pu32[5];
8039 pDstX87->DS = uPtr.pu16[6*2];
8040 pDstX87->Rsrvd2= 0;
8041 }
8042 }
8043
8044 /* Make adjustments. */
8045 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW);
8046 pDstX87->FCW &= ~X86_FCW_ZERO_MASK;
8047 iemFpuRecalcExceptionStatus(pDstX87);
8048 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
8049 * exceptions are pending after loading the saved state? */
8050}
8051
8052
8053/**
8054 * Implements 'FNSTENV'.
8055 *
8056 * @param enmEffOpSize The operand size (only REX.W really matters).
8057 * @param iEffSeg The effective segment register for @a GCPtrEff.
8058 * @param GCPtrEffDst The address of the image.
8059 */
8060IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
8061{
8062 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8063 RTPTRUNION uPtr;
8064 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
8065 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8066 if (rcStrict != VINF_SUCCESS)
8067 return rcStrict;
8068
8069 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx);
8070
8071 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8072 if (rcStrict != VINF_SUCCESS)
8073 return rcStrict;
8074
8075 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
8076 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8077 return VINF_SUCCESS;
8078}
8079
8080
8081/**
8082 * Implements 'FNSAVE'.
8083 *
8084 * @param GCPtrEffDst The address of the image.
8085 * @param enmEffOpSize The operand size.
8086 */
8087IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
8088{
8089 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8090 RTPTRUNION uPtr;
8091 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
8092 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8093 if (rcStrict != VINF_SUCCESS)
8094 return rcStrict;
8095
8096 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8097 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx);
8098 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
8099 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
8100 {
8101 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0];
8102 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1];
8103 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4];
8104 }
8105
8106 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8107 if (rcStrict != VINF_SUCCESS)
8108 return rcStrict;
8109
8110 /*
8111 * Re-initialize the FPU context.
8112 */
8113 pFpuCtx->FCW = 0x37f;
8114 pFpuCtx->FSW = 0;
8115 pFpuCtx->FTW = 0x00; /* 0 - empty */
8116 pFpuCtx->FPUDP = 0;
8117 pFpuCtx->DS = 0;
8118 pFpuCtx->Rsrvd2= 0;
8119 pFpuCtx->FPUIP = 0;
8120 pFpuCtx->CS = 0;
8121 pFpuCtx->Rsrvd1= 0;
8122 pFpuCtx->FOP = 0;
8123
8124 iemHlpUsedFpu(pVCpu);
8125 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8126 return VINF_SUCCESS;
8127}
8128
8129
8130
8131/**
8132 * Implements 'FLDENV'.
8133 *
8134 * @param enmEffOpSize The operand size (only REX.W really matters).
8135 * @param iEffSeg The effective segment register for @a GCPtrEff.
8136 * @param GCPtrEffSrc The address of the image.
8137 */
8138IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
8139{
8140 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8141 RTCPTRUNION uPtr;
8142 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
8143 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
8144 if (rcStrict != VINF_SUCCESS)
8145 return rcStrict;
8146
8147 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx);
8148
8149 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
8150 if (rcStrict != VINF_SUCCESS)
8151 return rcStrict;
8152
8153 iemHlpUsedFpu(pVCpu);
8154 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8155 return VINF_SUCCESS;
8156}
8157
8158
8159/**
8160 * Implements 'FRSTOR'.
8161 *
8162 * @param GCPtrEffSrc The address of the image.
8163 * @param enmEffOpSize The operand size.
8164 */
8165IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
8166{
8167 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8168 RTCPTRUNION uPtr;
8169 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
8170 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
8171 if (rcStrict != VINF_SUCCESS)
8172 return rcStrict;
8173
8174 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8175 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx);
8176 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
8177 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
8178 {
8179 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0];
8180 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1];
8181 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4];
8182 pFpuCtx->aRegs[i].au32[3] = 0;
8183 }
8184
8185 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
8186 if (rcStrict != VINF_SUCCESS)
8187 return rcStrict;
8188
8189 iemHlpUsedFpu(pVCpu);
8190 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8191 return VINF_SUCCESS;
8192}
8193
8194
8195/**
8196 * Implements 'FLDCW'.
8197 *
8198 * @param u16Fcw The new FCW.
8199 */
8200IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
8201{
8202 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8203
8204 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
8205 /** @todo Testcase: Try see what happens when trying to set undefined bits
8206 * (other than 6 and 7). Currently ignoring them. */
8207 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
8208 * according to FSW. (This is was is currently implemented.) */
8209 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8210 pFpuCtx->FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
8211 iemFpuRecalcExceptionStatus(pFpuCtx);
8212
8213 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
8214 iemHlpUsedFpu(pVCpu);
8215 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8216 return VINF_SUCCESS;
8217}
8218
8219
8220
8221/**
8222 * Implements the underflow case of fxch.
8223 *
8224 * @param iStReg The other stack register.
8225 */
8226IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
8227{
8228 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8229
8230 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8231 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW);
8232 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
8233 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW));
8234
8235 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
8236 * registers are read as QNaN and then exchanged. This could be
8237 * wrong... */
8238 if (pFpuCtx->FCW & X86_FCW_IM)
8239 {
8240 if (RT_BIT(iReg1) & pFpuCtx->FTW)
8241 {
8242 if (RT_BIT(iReg2) & pFpuCtx->FTW)
8243 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
8244 else
8245 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80;
8246 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
8247 }
8248 else
8249 {
8250 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80;
8251 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
8252 }
8253 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
8254 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
8255 }
8256 else
8257 {
8258 /* raise underflow exception, don't change anything. */
8259 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
8260 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
8261 }
8262
8263 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
8264 iemHlpUsedFpu(pVCpu);
8265 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8266 return VINF_SUCCESS;
8267}
8268
8269
8270/**
8271 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
8272 *
8273 * @param cToAdd 1 or 7.
8274 */
8275IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
8276{
8277 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8278 Assert(iStReg < 8);
8279
8280 /*
8281 * Raise exceptions.
8282 */
8283 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
8284 return iemRaiseDeviceNotAvailable(pVCpu);
8285
8286 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8287 uint16_t u16Fsw = pFpuCtx->FSW;
8288 if (u16Fsw & X86_FSW_ES)
8289 return iemRaiseMathFault(pVCpu);
8290
8291 /*
8292 * Check if any of the register accesses causes #SF + #IA.
8293 */
8294 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
8295 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
8296 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
8297 {
8298 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
8299 NOREF(u32Eflags);
8300
8301 pFpuCtx->FSW &= ~X86_FSW_C1;
8302 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
8303 if ( !(u16Fsw & X86_FSW_IE)
8304 || (pFpuCtx->FCW & X86_FCW_IM) )
8305 {
8306 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
8307 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
8308 }
8309 }
8310 else if (pFpuCtx->FCW & X86_FCW_IM)
8311 {
8312 /* Masked underflow. */
8313 pFpuCtx->FSW &= ~X86_FSW_C1;
8314 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
8315 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
8316 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
8317 }
8318 else
8319 {
8320 /* Raise underflow - don't touch EFLAGS or TOP. */
8321 pFpuCtx->FSW &= ~X86_FSW_C1;
8322 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
8323 fPop = false;
8324 }
8325
8326 /*
8327 * Pop if necessary.
8328 */
8329 if (fPop)
8330 {
8331 pFpuCtx->FTW &= ~RT_BIT(iReg1);
8332 pFpuCtx->FSW &= X86_FSW_TOP_MASK;
8333 pFpuCtx->FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
8334 }
8335
8336 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
8337 iemHlpUsedFpu(pVCpu);
8338 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8339 return VINF_SUCCESS;
8340}
8341
8342/** @} */
8343
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette