VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 72262

Last change on this file since 72262 was 72209, checked in by vboxsync, 7 years ago

VMM/IEM: VBOX_WITH_NESTED_HWVIRT_SVM.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 285.1 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 72209 2018-05-15 04:12:25Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
19# include "IEMAllCImplSvmInstr.cpp.h"
20#endif
21
22/** @name Misc Helpers
23 * @{
24 */
25
26
27/**
28 * Worker function for iemHlpCheckPortIOPermission, don't call directly.
29 *
30 * @returns Strict VBox status code.
31 *
32 * @param pVCpu The cross context virtual CPU structure of the calling thread.
33 * @param pCtx The register context.
34 * @param u16Port The port number.
35 * @param cbOperand The operand size.
36 */
37static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PVMCPU pVCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
38{
39 /* The TSS bits we're interested in are the same on 386 and AMD64. */
40 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY);
41 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
42 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap);
43 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64));
44
45 /*
46 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
47 */
48 Assert(!pCtx->tr.Attr.n.u1DescType);
49 if (RT_UNLIKELY( pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY
50 && pCtx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))
51 {
52 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
53 u16Port, cbOperand, pCtx->tr.Attr.n.u4Type, pCtx->tr.Attr.u));
54 return iemRaiseGeneralProtectionFault0(pVCpu);
55 }
56
57 /*
58 * Read the bitmap offset (may #PF).
59 */
60 uint16_t offBitmap;
61 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &offBitmap, UINT8_MAX,
62 pCtx->tr.u64Base + RT_OFFSETOF(X86TSS64, offIoBitmap));
63 if (rcStrict != VINF_SUCCESS)
64 {
65 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
66 return rcStrict;
67 }
68
69 /*
70 * The bit range from u16Port to (u16Port + cbOperand - 1), however intel
71 * describes the CPU actually reading two bytes regardless of whether the
72 * bit range crosses a byte boundrary. Thus the + 1 in the test below.
73 */
74 uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap;
75 /** @todo check if real CPUs ensures that offBitmap has a minimum value of
76 * for instance sizeof(X86TSS32). */
77 if (offFirstBit + 1 > pCtx->tr.u32Limit) /* the limit is inclusive */
78 {
79 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
80 offFirstBit, pCtx->tr.u32Limit));
81 return iemRaiseGeneralProtectionFault0(pVCpu);
82 }
83
84 /*
85 * Read the necessary bits.
86 */
87 /** @todo Test the assertion in the intel manual that the CPU reads two
88 * bytes. The question is how this works wrt to #PF and #GP on the
89 * 2nd byte when it's not required. */
90 uint16_t bmBytes = UINT16_MAX;
91 rcStrict = iemMemFetchSysU16(pVCpu, &bmBytes, UINT8_MAX, pCtx->tr.u64Base + offFirstBit);
92 if (rcStrict != VINF_SUCCESS)
93 {
94 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
95 return rcStrict;
96 }
97
98 /*
99 * Perform the check.
100 */
101 uint16_t fPortMask = (1 << cbOperand) - 1;
102 bmBytes >>= (u16Port & 7);
103 if (bmBytes & fPortMask)
104 {
105 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
106 u16Port, cbOperand, bmBytes, fPortMask));
107 return iemRaiseGeneralProtectionFault0(pVCpu);
108 }
109
110 return VINF_SUCCESS;
111}
112
113
114/**
115 * Checks if we are allowed to access the given I/O port, raising the
116 * appropriate exceptions if we aren't (or if the I/O bitmap is not
117 * accessible).
118 *
119 * @returns Strict VBox status code.
120 *
121 * @param pVCpu The cross context virtual CPU structure of the calling thread.
122 * @param pCtx The register context.
123 * @param u16Port The port number.
124 * @param cbOperand The operand size.
125 */
126DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PVMCPU pVCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
127{
128 X86EFLAGS Efl;
129 Efl.u = IEMMISC_GET_EFL(pVCpu, pCtx);
130 if ( (pCtx->cr0 & X86_CR0_PE)
131 && ( pVCpu->iem.s.uCpl > Efl.Bits.u2IOPL
132 || Efl.Bits.u1VM) )
133 return iemHlpCheckPortIOPermissionBitmap(pVCpu, pCtx, u16Port, cbOperand);
134 return VINF_SUCCESS;
135}
136
137
138#if 0
139/**
140 * Calculates the parity bit.
141 *
142 * @returns true if the bit is set, false if not.
143 * @param u8Result The least significant byte of the result.
144 */
145static bool iemHlpCalcParityFlag(uint8_t u8Result)
146{
147 /*
148 * Parity is set if the number of bits in the least significant byte of
149 * the result is even.
150 */
151 uint8_t cBits;
152 cBits = u8Result & 1; /* 0 */
153 u8Result >>= 1;
154 cBits += u8Result & 1;
155 u8Result >>= 1;
156 cBits += u8Result & 1;
157 u8Result >>= 1;
158 cBits += u8Result & 1;
159 u8Result >>= 1;
160 cBits += u8Result & 1; /* 4 */
161 u8Result >>= 1;
162 cBits += u8Result & 1;
163 u8Result >>= 1;
164 cBits += u8Result & 1;
165 u8Result >>= 1;
166 cBits += u8Result & 1;
167 return !(cBits & 1);
168}
169#endif /* not used */
170
171
172/**
173 * Updates the specified flags according to a 8-bit result.
174 *
175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
176 * @param u8Result The result to set the flags according to.
177 * @param fToUpdate The flags to update.
178 * @param fUndefined The flags that are specified as undefined.
179 */
180static void iemHlpUpdateArithEFlagsU8(PVMCPU pVCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
181{
182 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
183
184 uint32_t fEFlags = pCtx->eflags.u;
185 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
186 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
187 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
188#ifdef IEM_VERIFICATION_MODE_FULL
189 pVCpu->iem.s.fUndefinedEFlags |= fUndefined;
190#endif
191}
192
193
194/**
195 * Updates the specified flags according to a 16-bit result.
196 *
197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
198 * @param u16Result The result to set the flags according to.
199 * @param fToUpdate The flags to update.
200 * @param fUndefined The flags that are specified as undefined.
201 */
202static void iemHlpUpdateArithEFlagsU16(PVMCPU pVCpu, uint16_t u16Result, uint32_t fToUpdate, uint32_t fUndefined)
203{
204 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
205
206 uint32_t fEFlags = pCtx->eflags.u;
207 iemAImpl_test_u16(&u16Result, u16Result, &fEFlags);
208 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
209 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
210#ifdef IEM_VERIFICATION_MODE_FULL
211 pVCpu->iem.s.fUndefinedEFlags |= fUndefined;
212#endif
213}
214
215
216/**
217 * Helper used by iret.
218 *
219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
220 * @param uCpl The new CPL.
221 * @param pSReg Pointer to the segment register.
222 */
223static void iemHlpAdjustSelectorForNewCpl(PVMCPU pVCpu, uint8_t uCpl, PCPUMSELREG pSReg)
224{
225#ifdef VBOX_WITH_RAW_MODE_NOT_R0
226 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
227 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
228#else
229 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
230#endif
231
232 if ( uCpl > pSReg->Attr.n.u2Dpl
233 && pSReg->Attr.n.u1DescType /* code or data, not system */
234 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
235 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
236 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, 0);
237}
238
239
240/**
241 * Indicates that we have modified the FPU state.
242 *
243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
244 */
245DECLINLINE(void) iemHlpUsedFpu(PVMCPU pVCpu)
246{
247 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
248}
249
250/** @} */
251
252/** @name C Implementations
253 * @{
254 */
255
256/**
257 * Implements a 16-bit popa.
258 */
259IEM_CIMPL_DEF_0(iemCImpl_popa_16)
260{
261 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
262 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu, pCtx);
263 RTGCPTR GCPtrLast = GCPtrStart + 15;
264 VBOXSTRICTRC rcStrict;
265
266 /*
267 * The docs are a bit hard to comprehend here, but it looks like we wrap
268 * around in real mode as long as none of the individual "popa" crosses the
269 * end of the stack segment. In protected mode we check the whole access
270 * in one go. For efficiency, only do the word-by-word thing if we're in
271 * danger of wrapping around.
272 */
273 /** @todo do popa boundary / wrap-around checks. */
274 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
275 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
276 {
277 /* word-by-word */
278 RTUINT64U TmpRsp;
279 TmpRsp.u = pCtx->rsp;
280 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->di, &TmpRsp);
281 if (rcStrict == VINF_SUCCESS)
282 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->si, &TmpRsp);
283 if (rcStrict == VINF_SUCCESS)
284 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->bp, &TmpRsp);
285 if (rcStrict == VINF_SUCCESS)
286 {
287 iemRegAddToRspEx(pVCpu, pCtx, &TmpRsp, 2); /* sp */
288 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->bx, &TmpRsp);
289 }
290 if (rcStrict == VINF_SUCCESS)
291 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->dx, &TmpRsp);
292 if (rcStrict == VINF_SUCCESS)
293 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->cx, &TmpRsp);
294 if (rcStrict == VINF_SUCCESS)
295 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->ax, &TmpRsp);
296 if (rcStrict == VINF_SUCCESS)
297 {
298 pCtx->rsp = TmpRsp.u;
299 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
300 }
301 }
302 else
303 {
304 uint16_t const *pa16Mem = NULL;
305 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
306 if (rcStrict == VINF_SUCCESS)
307 {
308 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
309 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
310 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
311 /* skip sp */
312 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
313 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
314 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
315 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
316 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
317 if (rcStrict == VINF_SUCCESS)
318 {
319 iemRegAddToRsp(pVCpu, pCtx, 16);
320 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
321 }
322 }
323 }
324 return rcStrict;
325}
326
327
328/**
329 * Implements a 32-bit popa.
330 */
331IEM_CIMPL_DEF_0(iemCImpl_popa_32)
332{
333 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
334 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu, pCtx);
335 RTGCPTR GCPtrLast = GCPtrStart + 31;
336 VBOXSTRICTRC rcStrict;
337
338 /*
339 * The docs are a bit hard to comprehend here, but it looks like we wrap
340 * around in real mode as long as none of the individual "popa" crosses the
341 * end of the stack segment. In protected mode we check the whole access
342 * in one go. For efficiency, only do the word-by-word thing if we're in
343 * danger of wrapping around.
344 */
345 /** @todo do popa boundary / wrap-around checks. */
346 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
347 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
348 {
349 /* word-by-word */
350 RTUINT64U TmpRsp;
351 TmpRsp.u = pCtx->rsp;
352 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->edi, &TmpRsp);
353 if (rcStrict == VINF_SUCCESS)
354 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->esi, &TmpRsp);
355 if (rcStrict == VINF_SUCCESS)
356 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->ebp, &TmpRsp);
357 if (rcStrict == VINF_SUCCESS)
358 {
359 iemRegAddToRspEx(pVCpu, pCtx, &TmpRsp, 2); /* sp */
360 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->ebx, &TmpRsp);
361 }
362 if (rcStrict == VINF_SUCCESS)
363 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->edx, &TmpRsp);
364 if (rcStrict == VINF_SUCCESS)
365 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->ecx, &TmpRsp);
366 if (rcStrict == VINF_SUCCESS)
367 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->eax, &TmpRsp);
368 if (rcStrict == VINF_SUCCESS)
369 {
370#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
371 pCtx->rdi &= UINT32_MAX;
372 pCtx->rsi &= UINT32_MAX;
373 pCtx->rbp &= UINT32_MAX;
374 pCtx->rbx &= UINT32_MAX;
375 pCtx->rdx &= UINT32_MAX;
376 pCtx->rcx &= UINT32_MAX;
377 pCtx->rax &= UINT32_MAX;
378#endif
379 pCtx->rsp = TmpRsp.u;
380 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
381 }
382 }
383 else
384 {
385 uint32_t const *pa32Mem;
386 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
387 if (rcStrict == VINF_SUCCESS)
388 {
389 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
390 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
391 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
392 /* skip esp */
393 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
394 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
395 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
396 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
397 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
398 if (rcStrict == VINF_SUCCESS)
399 {
400 iemRegAddToRsp(pVCpu, pCtx, 32);
401 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
402 }
403 }
404 }
405 return rcStrict;
406}
407
408
409/**
410 * Implements a 16-bit pusha.
411 */
412IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
413{
414 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
415 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu, pCtx);
416 RTGCPTR GCPtrBottom = GCPtrTop - 15;
417 VBOXSTRICTRC rcStrict;
418
419 /*
420 * The docs are a bit hard to comprehend here, but it looks like we wrap
421 * around in real mode as long as none of the individual "pushd" crosses the
422 * end of the stack segment. In protected mode we check the whole access
423 * in one go. For efficiency, only do the word-by-word thing if we're in
424 * danger of wrapping around.
425 */
426 /** @todo do pusha boundary / wrap-around checks. */
427 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
428 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
429 {
430 /* word-by-word */
431 RTUINT64U TmpRsp;
432 TmpRsp.u = pCtx->rsp;
433 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->ax, &TmpRsp);
434 if (rcStrict == VINF_SUCCESS)
435 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->cx, &TmpRsp);
436 if (rcStrict == VINF_SUCCESS)
437 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->dx, &TmpRsp);
438 if (rcStrict == VINF_SUCCESS)
439 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->bx, &TmpRsp);
440 if (rcStrict == VINF_SUCCESS)
441 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->sp, &TmpRsp);
442 if (rcStrict == VINF_SUCCESS)
443 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->bp, &TmpRsp);
444 if (rcStrict == VINF_SUCCESS)
445 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->si, &TmpRsp);
446 if (rcStrict == VINF_SUCCESS)
447 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->di, &TmpRsp);
448 if (rcStrict == VINF_SUCCESS)
449 {
450 pCtx->rsp = TmpRsp.u;
451 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
452 }
453 }
454 else
455 {
456 GCPtrBottom--;
457 uint16_t *pa16Mem = NULL;
458 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
459 if (rcStrict == VINF_SUCCESS)
460 {
461 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
462 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
463 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
464 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
465 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
466 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
467 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
468 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
469 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
470 if (rcStrict == VINF_SUCCESS)
471 {
472 iemRegSubFromRsp(pVCpu, pCtx, 16);
473 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
474 }
475 }
476 }
477 return rcStrict;
478}
479
480
481/**
482 * Implements a 32-bit pusha.
483 */
484IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
485{
486 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
487 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu, pCtx);
488 RTGCPTR GCPtrBottom = GCPtrTop - 31;
489 VBOXSTRICTRC rcStrict;
490
491 /*
492 * The docs are a bit hard to comprehend here, but it looks like we wrap
493 * around in real mode as long as none of the individual "pusha" crosses the
494 * end of the stack segment. In protected mode we check the whole access
495 * in one go. For efficiency, only do the word-by-word thing if we're in
496 * danger of wrapping around.
497 */
498 /** @todo do pusha boundary / wrap-around checks. */
499 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
500 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
501 {
502 /* word-by-word */
503 RTUINT64U TmpRsp;
504 TmpRsp.u = pCtx->rsp;
505 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->eax, &TmpRsp);
506 if (rcStrict == VINF_SUCCESS)
507 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->ecx, &TmpRsp);
508 if (rcStrict == VINF_SUCCESS)
509 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->edx, &TmpRsp);
510 if (rcStrict == VINF_SUCCESS)
511 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->ebx, &TmpRsp);
512 if (rcStrict == VINF_SUCCESS)
513 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->esp, &TmpRsp);
514 if (rcStrict == VINF_SUCCESS)
515 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->ebp, &TmpRsp);
516 if (rcStrict == VINF_SUCCESS)
517 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->esi, &TmpRsp);
518 if (rcStrict == VINF_SUCCESS)
519 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->edi, &TmpRsp);
520 if (rcStrict == VINF_SUCCESS)
521 {
522 pCtx->rsp = TmpRsp.u;
523 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
524 }
525 }
526 else
527 {
528 GCPtrBottom--;
529 uint32_t *pa32Mem;
530 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
531 if (rcStrict == VINF_SUCCESS)
532 {
533 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
534 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
535 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
536 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
537 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
538 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
539 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
540 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
541 rcStrict = iemMemCommitAndUnmap(pVCpu, pa32Mem, IEM_ACCESS_STACK_W);
542 if (rcStrict == VINF_SUCCESS)
543 {
544 iemRegSubFromRsp(pVCpu, pCtx, 32);
545 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
546 }
547 }
548 }
549 return rcStrict;
550}
551
552
553/**
554 * Implements pushf.
555 *
556 *
557 * @param enmEffOpSize The effective operand size.
558 */
559IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
560{
561 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
562 VBOXSTRICTRC rcStrict;
563
564 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_PUSHF))
565 {
566 Log2(("pushf: Guest intercept -> #VMEXIT\n"));
567 IEM_SVM_UPDATE_NRIP(pVCpu);
568 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
569 }
570
571 /*
572 * If we're in V8086 mode some care is required (which is why we're in
573 * doing this in a C implementation).
574 */
575 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
576 if ( (fEfl & X86_EFL_VM)
577 && X86_EFL_GET_IOPL(fEfl) != 3 )
578 {
579 Assert(pCtx->cr0 & X86_CR0_PE);
580 if ( enmEffOpSize != IEMMODE_16BIT
581 || !(pCtx->cr4 & X86_CR4_VME))
582 return iemRaiseGeneralProtectionFault0(pVCpu);
583 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
584 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
585 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
586 }
587 else
588 {
589
590 /*
591 * Ok, clear RF and VM, adjust for ancient CPUs, and push the flags.
592 */
593 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
594
595 switch (enmEffOpSize)
596 {
597 case IEMMODE_16BIT:
598 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
599 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_186)
600 fEfl |= UINT16_C(0xf000);
601 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
602 break;
603 case IEMMODE_32BIT:
604 rcStrict = iemMemStackPushU32(pVCpu, fEfl);
605 break;
606 case IEMMODE_64BIT:
607 rcStrict = iemMemStackPushU64(pVCpu, fEfl);
608 break;
609 IEM_NOT_REACHED_DEFAULT_CASE_RET();
610 }
611 }
612 if (rcStrict != VINF_SUCCESS)
613 return rcStrict;
614
615 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
616 return VINF_SUCCESS;
617}
618
619
620/**
621 * Implements popf.
622 *
623 * @param enmEffOpSize The effective operand size.
624 */
625IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
626{
627 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
628 uint32_t const fEflOld = IEMMISC_GET_EFL(pVCpu, pCtx);
629 VBOXSTRICTRC rcStrict;
630 uint32_t fEflNew;
631
632 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_POPF))
633 {
634 Log2(("popf: Guest intercept -> #VMEXIT\n"));
635 IEM_SVM_UPDATE_NRIP(pVCpu);
636 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
637 }
638
639 /*
640 * V8086 is special as usual.
641 */
642 if (fEflOld & X86_EFL_VM)
643 {
644 /*
645 * Almost anything goes if IOPL is 3.
646 */
647 if (X86_EFL_GET_IOPL(fEflOld) == 3)
648 {
649 switch (enmEffOpSize)
650 {
651 case IEMMODE_16BIT:
652 {
653 uint16_t u16Value;
654 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
655 if (rcStrict != VINF_SUCCESS)
656 return rcStrict;
657 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
658 break;
659 }
660 case IEMMODE_32BIT:
661 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
662 if (rcStrict != VINF_SUCCESS)
663 return rcStrict;
664 break;
665 IEM_NOT_REACHED_DEFAULT_CASE_RET();
666 }
667
668 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
669 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
670 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
671 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
672 }
673 /*
674 * Interrupt flag virtualization with CR4.VME=1.
675 */
676 else if ( enmEffOpSize == IEMMODE_16BIT
677 && (pCtx->cr4 & X86_CR4_VME) )
678 {
679 uint16_t u16Value;
680 RTUINT64U TmpRsp;
681 TmpRsp.u = pCtx->rsp;
682 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp);
683 if (rcStrict != VINF_SUCCESS)
684 return rcStrict;
685
686 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
687 * or before? */
688 if ( ( (u16Value & X86_EFL_IF)
689 && (fEflOld & X86_EFL_VIP))
690 || (u16Value & X86_EFL_TF) )
691 return iemRaiseGeneralProtectionFault0(pVCpu);
692
693 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
694 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
695 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
696 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
697
698 pCtx->rsp = TmpRsp.u;
699 }
700 else
701 return iemRaiseGeneralProtectionFault0(pVCpu);
702
703 }
704 /*
705 * Not in V8086 mode.
706 */
707 else
708 {
709 /* Pop the flags. */
710 switch (enmEffOpSize)
711 {
712 case IEMMODE_16BIT:
713 {
714 uint16_t u16Value;
715 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
716 if (rcStrict != VINF_SUCCESS)
717 return rcStrict;
718 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
719
720 /*
721 * Ancient CPU adjustments:
722 * - 8086, 80186, V20/30:
723 * Fixed bits 15:12 bits are not kept correctly internally, mostly for
724 * practical reasons (masking below). We add them when pushing flags.
725 * - 80286:
726 * The NT and IOPL flags cannot be popped from real mode and are
727 * therefore always zero (since a 286 can never exit from PM and
728 * their initial value is zero). This changed on a 386 and can
729 * therefore be used to detect 286 or 386 CPU in real mode.
730 */
731 if ( IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286
732 && !(pCtx->cr0 & X86_CR0_PE) )
733 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL);
734 break;
735 }
736 case IEMMODE_32BIT:
737 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
738 if (rcStrict != VINF_SUCCESS)
739 return rcStrict;
740 break;
741 case IEMMODE_64BIT:
742 {
743 uint64_t u64Value;
744 rcStrict = iemMemStackPopU64(pVCpu, &u64Value);
745 if (rcStrict != VINF_SUCCESS)
746 return rcStrict;
747 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
748 break;
749 }
750 IEM_NOT_REACHED_DEFAULT_CASE_RET();
751 }
752
753 /* Merge them with the current flags. */
754 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
755 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
756 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
757 || pVCpu->iem.s.uCpl == 0)
758 {
759 fEflNew &= fPopfBits;
760 fEflNew |= ~fPopfBits & fEflOld;
761 }
762 else if (pVCpu->iem.s.uCpl <= X86_EFL_GET_IOPL(fEflOld))
763 {
764 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
765 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
766 }
767 else
768 {
769 fEflNew &= fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF);
770 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
771 }
772 }
773
774 /*
775 * Commit the flags.
776 */
777 Assert(fEflNew & RT_BIT_32(1));
778 IEMMISC_SET_EFL(pVCpu, pCtx, fEflNew);
779 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
780
781 return VINF_SUCCESS;
782}
783
784
785/**
786 * Implements an indirect call.
787 *
788 * @param uNewPC The new program counter (RIP) value (loaded from the
789 * operand).
790 * @param enmEffOpSize The effective operand size.
791 */
792IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
793{
794 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
795 uint16_t uOldPC = pCtx->ip + cbInstr;
796 if (uNewPC > pCtx->cs.u32Limit)
797 return iemRaiseGeneralProtectionFault0(pVCpu);
798
799 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
800 if (rcStrict != VINF_SUCCESS)
801 return rcStrict;
802
803 pCtx->rip = uNewPC;
804 pCtx->eflags.Bits.u1RF = 0;
805
806#ifndef IEM_WITH_CODE_TLB
807 /* Flush the prefetch buffer. */
808 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
809#endif
810 return VINF_SUCCESS;
811}
812
813
814/**
815 * Implements a 16-bit relative call.
816 *
817 * @param offDisp The displacment offset.
818 */
819IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
820{
821 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
822 uint16_t uOldPC = pCtx->ip + cbInstr;
823 uint16_t uNewPC = uOldPC + offDisp;
824 if (uNewPC > pCtx->cs.u32Limit)
825 return iemRaiseGeneralProtectionFault0(pVCpu);
826
827 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
828 if (rcStrict != VINF_SUCCESS)
829 return rcStrict;
830
831 pCtx->rip = uNewPC;
832 pCtx->eflags.Bits.u1RF = 0;
833
834#ifndef IEM_WITH_CODE_TLB
835 /* Flush the prefetch buffer. */
836 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
837#endif
838 return VINF_SUCCESS;
839}
840
841
842/**
843 * Implements a 32-bit indirect call.
844 *
845 * @param uNewPC The new program counter (RIP) value (loaded from the
846 * operand).
847 * @param enmEffOpSize The effective operand size.
848 */
849IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
850{
851 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
852 uint32_t uOldPC = pCtx->eip + cbInstr;
853 if (uNewPC > pCtx->cs.u32Limit)
854 return iemRaiseGeneralProtectionFault0(pVCpu);
855
856 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
857 if (rcStrict != VINF_SUCCESS)
858 return rcStrict;
859
860#if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE) && defined(VBOX_WITH_CALL_RECORD)
861 /*
862 * CASM hook for recording interesting indirect calls.
863 */
864 if ( !pCtx->eflags.Bits.u1IF
865 && (pCtx->cr0 & X86_CR0_PG)
866 && !CSAMIsEnabled(pVCpu->CTX_SUFF(pVM))
867 && pVCpu->iem.s.uCpl == 0)
868 {
869 EMSTATE enmState = EMGetState(pVCpu);
870 if ( enmState == EMSTATE_IEM_THEN_REM
871 || enmState == EMSTATE_IEM
872 || enmState == EMSTATE_REM)
873 CSAMR3RecordCallAddress(pVCpu->CTX_SUFF(pVM), pCtx->eip);
874 }
875#endif
876
877 pCtx->rip = uNewPC;
878 pCtx->eflags.Bits.u1RF = 0;
879
880#ifndef IEM_WITH_CODE_TLB
881 /* Flush the prefetch buffer. */
882 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
883#endif
884 return VINF_SUCCESS;
885}
886
887
888/**
889 * Implements a 32-bit relative call.
890 *
891 * @param offDisp The displacment offset.
892 */
893IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
894{
895 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
896 uint32_t uOldPC = pCtx->eip + cbInstr;
897 uint32_t uNewPC = uOldPC + offDisp;
898 if (uNewPC > pCtx->cs.u32Limit)
899 return iemRaiseGeneralProtectionFault0(pVCpu);
900
901 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
902 if (rcStrict != VINF_SUCCESS)
903 return rcStrict;
904
905 pCtx->rip = uNewPC;
906 pCtx->eflags.Bits.u1RF = 0;
907
908#ifndef IEM_WITH_CODE_TLB
909 /* Flush the prefetch buffer. */
910 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
911#endif
912 return VINF_SUCCESS;
913}
914
915
916/**
917 * Implements a 64-bit indirect call.
918 *
919 * @param uNewPC The new program counter (RIP) value (loaded from the
920 * operand).
921 * @param enmEffOpSize The effective operand size.
922 */
923IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
924{
925 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
926 uint64_t uOldPC = pCtx->rip + cbInstr;
927 if (!IEM_IS_CANONICAL(uNewPC))
928 return iemRaiseGeneralProtectionFault0(pVCpu);
929
930 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
931 if (rcStrict != VINF_SUCCESS)
932 return rcStrict;
933
934 pCtx->rip = uNewPC;
935 pCtx->eflags.Bits.u1RF = 0;
936
937#ifndef IEM_WITH_CODE_TLB
938 /* Flush the prefetch buffer. */
939 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
940#endif
941 return VINF_SUCCESS;
942}
943
944
945/**
946 * Implements a 64-bit relative call.
947 *
948 * @param offDisp The displacment offset.
949 */
950IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
951{
952 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
953 uint64_t uOldPC = pCtx->rip + cbInstr;
954 uint64_t uNewPC = uOldPC + offDisp;
955 if (!IEM_IS_CANONICAL(uNewPC))
956 return iemRaiseNotCanonical(pVCpu);
957
958 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
959 if (rcStrict != VINF_SUCCESS)
960 return rcStrict;
961
962 pCtx->rip = uNewPC;
963 pCtx->eflags.Bits.u1RF = 0;
964
965#ifndef IEM_WITH_CODE_TLB
966 /* Flush the prefetch buffer. */
967 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
968#endif
969
970 return VINF_SUCCESS;
971}
972
973
974/**
975 * Implements far jumps and calls thru task segments (TSS).
976 *
977 * @param uSel The selector.
978 * @param enmBranch The kind of branching we're performing.
979 * @param enmEffOpSize The effective operand size.
980 * @param pDesc The descriptor corresponding to @a uSel. The type is
981 * task gate.
982 */
983IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
984{
985#ifndef IEM_IMPLEMENTS_TASKSWITCH
986 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
987#else
988 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
989 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
990 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
991 RT_NOREF_PV(enmEffOpSize);
992
993 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
994 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
995 {
996 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
997 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
998 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
999 }
1000
1001 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
1002 * far calls (see iemCImpl_callf). Most likely in both cases it should be
1003 * checked here, need testcases. */
1004 if (!pDesc->Legacy.Gen.u1Present)
1005 {
1006 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel));
1007 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1008 }
1009
1010 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1011 uint32_t uNextEip = pCtx->eip + cbInstr;
1012 return iemTaskSwitch(pVCpu, pCtx, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1013 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc);
1014#endif
1015}
1016
1017
1018/**
1019 * Implements far jumps and calls thru task gates.
1020 *
1021 * @param uSel The selector.
1022 * @param enmBranch The kind of branching we're performing.
1023 * @param enmEffOpSize The effective operand size.
1024 * @param pDesc The descriptor corresponding to @a uSel. The type is
1025 * task gate.
1026 */
1027IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1028{
1029#ifndef IEM_IMPLEMENTS_TASKSWITCH
1030 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1031#else
1032 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1033 RT_NOREF_PV(enmEffOpSize);
1034
1035 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
1036 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1037 {
1038 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1039 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1040 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1041 }
1042
1043 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
1044 * far calls (see iemCImpl_callf). Most likely in both cases it should be
1045 * checked here, need testcases. */
1046 if (!pDesc->Legacy.Gen.u1Present)
1047 {
1048 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel));
1049 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1050 }
1051
1052 /*
1053 * Fetch the new TSS descriptor from the GDT.
1054 */
1055 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel;
1056 if (uSelTss & X86_SEL_LDT)
1057 {
1058 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss));
1059 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1060 }
1061
1062 IEMSELDESC TssDesc;
1063 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelTss, X86_XCPT_GP);
1064 if (rcStrict != VINF_SUCCESS)
1065 return rcStrict;
1066
1067 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK)
1068 {
1069 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss,
1070 TssDesc.Legacy.Gate.u4Type));
1071 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1072 }
1073
1074 if (!TssDesc.Legacy.Gate.u1Present)
1075 {
1076 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss));
1077 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelTss & X86_SEL_MASK_OFF_RPL);
1078 }
1079
1080 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1081 uint32_t uNextEip = pCtx->eip + cbInstr;
1082 return iemTaskSwitch(pVCpu, pCtx, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1083 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc);
1084#endif
1085}
1086
1087
1088/**
1089 * Implements far jumps and calls thru call gates.
1090 *
1091 * @param uSel The selector.
1092 * @param enmBranch The kind of branching we're performing.
1093 * @param enmEffOpSize The effective operand size.
1094 * @param pDesc The descriptor corresponding to @a uSel. The type is
1095 * call gate.
1096 */
1097IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1098{
1099#define IEM_IMPLEMENTS_CALLGATE
1100#ifndef IEM_IMPLEMENTS_CALLGATE
1101 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1102#else
1103 RT_NOREF_PV(enmEffOpSize);
1104
1105 /* NB: Far jumps can only do intra-privilege transfers. Far calls support
1106 * inter-privilege calls and are much more complex.
1107 *
1108 * NB: 64-bit call gate has the same type as a 32-bit call gate! If
1109 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
1110 * must be 16-bit or 32-bit.
1111 */
1112 /** @todo: effective operand size is probably irrelevant here, only the
1113 * call gate bitness matters??
1114 */
1115 VBOXSTRICTRC rcStrict;
1116 RTPTRUNION uPtrRet;
1117 uint64_t uNewRsp;
1118 uint64_t uNewRip;
1119 uint64_t u64Base;
1120 uint32_t cbLimit;
1121 RTSEL uNewCS;
1122 IEMSELDESC DescCS;
1123
1124 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE);
1125 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1126 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE
1127 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE);
1128
1129 /* Determine the new instruction pointer from the gate descriptor. */
1130 uNewRip = pDesc->Legacy.Gate.u16OffsetLow
1131 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16)
1132 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32);
1133
1134 /* Perform DPL checks on the gate descriptor. */
1135 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
1136 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1137 {
1138 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1139 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1140 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1141 }
1142
1143 /** @todo does this catch NULL selectors, too? */
1144 if (!pDesc->Legacy.Gen.u1Present)
1145 {
1146 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel));
1147 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1148 }
1149
1150 /*
1151 * Fetch the target CS descriptor from the GDT or LDT.
1152 */
1153 uNewCS = pDesc->Legacy.Gate.u16Sel;
1154 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_GP);
1155 if (rcStrict != VINF_SUCCESS)
1156 return rcStrict;
1157
1158 /* Target CS must be a code selector. */
1159 if ( !DescCS.Legacy.Gen.u1DescType
1160 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1161 {
1162 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1163 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1164 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1165 }
1166
1167 /* Privilege checks on target CS. */
1168 if (enmBranch == IEMBRANCH_JUMP)
1169 {
1170 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1171 {
1172 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
1173 {
1174 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1175 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1176 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1177 }
1178 }
1179 else
1180 {
1181 if (DescCS.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl)
1182 {
1183 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1184 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1185 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1186 }
1187 }
1188 }
1189 else
1190 {
1191 Assert(enmBranch == IEMBRANCH_CALL);
1192 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
1193 {
1194 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1195 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1196 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1197 }
1198 }
1199
1200 /* Additional long mode checks. */
1201 if (IEM_IS_LONG_MODE(pVCpu))
1202 {
1203 if (!DescCS.Legacy.Gen.u1Long)
1204 {
1205 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS));
1206 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1207 }
1208
1209 /* L vs D. */
1210 if ( DescCS.Legacy.Gen.u1Long
1211 && DescCS.Legacy.Gen.u1DefBig)
1212 {
1213 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS));
1214 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1215 }
1216 }
1217
1218 if (!DescCS.Legacy.Gate.u1Present)
1219 {
1220 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS));
1221 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCS);
1222 }
1223
1224 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1225
1226 if (enmBranch == IEMBRANCH_JUMP)
1227 {
1228 /** @todo: This is very similar to regular far jumps; merge! */
1229 /* Jumps are fairly simple... */
1230
1231 /* Chop the high bits off if 16-bit gate (Intel says so). */
1232 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1233 uNewRip = (uint16_t)uNewRip;
1234
1235 /* Limit check for non-long segments. */
1236 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1237 if (DescCS.Legacy.Gen.u1Long)
1238 u64Base = 0;
1239 else
1240 {
1241 if (uNewRip > cbLimit)
1242 {
1243 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
1244 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1245 }
1246 u64Base = X86DESC_BASE(&DescCS.Legacy);
1247 }
1248
1249 /* Canonical address check. */
1250 if (!IEM_IS_CANONICAL(uNewRip))
1251 {
1252 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1253 return iemRaiseNotCanonical(pVCpu);
1254 }
1255
1256 /*
1257 * Ok, everything checked out fine. Now set the accessed bit before
1258 * committing the result into CS, CSHID and RIP.
1259 */
1260 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1261 {
1262 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1263 if (rcStrict != VINF_SUCCESS)
1264 return rcStrict;
1265 /** @todo check what VT-x and AMD-V does. */
1266 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1267 }
1268
1269 /* commit */
1270 pCtx->rip = uNewRip;
1271 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1272 pCtx->cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */
1273 pCtx->cs.ValidSel = pCtx->cs.Sel;
1274 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1275 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1276 pCtx->cs.u32Limit = cbLimit;
1277 pCtx->cs.u64Base = u64Base;
1278 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1279 }
1280 else
1281 {
1282 Assert(enmBranch == IEMBRANCH_CALL);
1283 /* Calls are much more complicated. */
1284
1285 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) && (DescCS.Legacy.Gen.u2Dpl < pVCpu->iem.s.uCpl))
1286 {
1287 uint16_t offNewStack; /* Offset of new stack in TSS. */
1288 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */
1289 uint8_t uNewCSDpl;
1290 uint8_t cbWords;
1291 RTSEL uNewSS;
1292 RTSEL uOldSS;
1293 uint64_t uOldRsp;
1294 IEMSELDESC DescSS;
1295 RTPTRUNION uPtrTSS;
1296 RTGCPTR GCPtrTSS;
1297 RTPTRUNION uPtrParmWds;
1298 RTGCPTR GCPtrParmWds;
1299
1300 /* More privilege. This is the fun part. */
1301 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */
1302
1303 /*
1304 * Determine new SS:rSP from the TSS.
1305 */
1306 Assert(!pCtx->tr.Attr.n.u1DescType);
1307
1308 /* Figure out where the new stack pointer is stored in the TSS. */
1309 uNewCSDpl = DescCS.Legacy.Gen.u2Dpl;
1310 if (!IEM_IS_LONG_MODE(pVCpu))
1311 {
1312 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1313 {
1314 offNewStack = RT_OFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8;
1315 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0);
1316 }
1317 else
1318 {
1319 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1320 offNewStack = RT_OFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4;
1321 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0);
1322 }
1323 }
1324 else
1325 {
1326 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1327 offNewStack = RT_OFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0);
1328 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0);
1329 }
1330
1331 /* Check against TSS limit. */
1332 if ((uint16_t)(offNewStack + cbNewStack - 1) > pCtx->tr.u32Limit)
1333 {
1334 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pCtx->tr.u32Limit));
1335 return iemRaiseTaskSwitchFaultBySelector(pVCpu, pCtx->tr.Sel);
1336 }
1337
1338 GCPtrTSS = pCtx->tr.u64Base + offNewStack;
1339 rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R);
1340 if (rcStrict != VINF_SUCCESS)
1341 {
1342 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1343 return rcStrict;
1344 }
1345
1346 if (!IEM_IS_LONG_MODE(pVCpu))
1347 {
1348 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1349 {
1350 uNewRsp = uPtrTSS.pu32[0];
1351 uNewSS = uPtrTSS.pu16[2];
1352 }
1353 else
1354 {
1355 Assert(pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1356 uNewRsp = uPtrTSS.pu16[0];
1357 uNewSS = uPtrTSS.pu16[1];
1358 }
1359 }
1360 else
1361 {
1362 Assert(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1363 /* SS will be a NULL selector, but that's valid. */
1364 uNewRsp = uPtrTSS.pu64[0];
1365 uNewSS = uNewCSDpl;
1366 }
1367
1368 /* Done with the TSS now. */
1369 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);
1370 if (rcStrict != VINF_SUCCESS)
1371 {
1372 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1373 return rcStrict;
1374 }
1375
1376 /* Only used outside of long mode. */
1377 cbWords = pDesc->Legacy.Gate.u5ParmCount;
1378
1379 /* If EFER.LMA is 0, there's extra work to do. */
1380 if (!IEM_IS_LONG_MODE(pVCpu))
1381 {
1382 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
1383 {
1384 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
1385 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1386 }
1387
1388 /* Grab the new SS descriptor. */
1389 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1390 if (rcStrict != VINF_SUCCESS)
1391 return rcStrict;
1392
1393 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */
1394 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL))
1395 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl))
1396 {
1397 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n",
1398 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl));
1399 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1400 }
1401
1402 /* Ensure new SS is a writable data segment. */
1403 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
1404 {
1405 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
1406 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1407 }
1408
1409 if (!DescSS.Legacy.Gen.u1Present)
1410 {
1411 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS));
1412 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
1413 }
1414 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1415 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords);
1416 else
1417 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords);
1418 }
1419 else
1420 {
1421 /* Just grab the new (NULL) SS descriptor. */
1422 /** @todo testcase: Check whether the zero GDT entry is actually loaded here
1423 * like we do... */
1424 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1425 if (rcStrict != VINF_SUCCESS)
1426 return rcStrict;
1427
1428 cbNewStack = sizeof(uint64_t) * 4;
1429 }
1430
1431 /** @todo: According to Intel, new stack is checked for enough space first,
1432 * then switched. According to AMD, the stack is switched first and
1433 * then pushes might fault!
1434 * NB: OS/2 Warp 3/4 actively relies on the fact that possible
1435 * incoming stack #PF happens before actual stack switch. AMD is
1436 * either lying or implicitly assumes that new state is committed
1437 * only if and when an instruction doesn't fault.
1438 */
1439
1440 /** @todo: According to AMD, CS is loaded first, then SS.
1441 * According to Intel, it's the other way around!?
1442 */
1443
1444 /** @todo: Intel and AMD disagree on when exactly the CPL changes! */
1445
1446 /* Set the accessed bit before committing new SS. */
1447 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1448 {
1449 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
1450 if (rcStrict != VINF_SUCCESS)
1451 return rcStrict;
1452 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1453 }
1454
1455 /* Remember the old SS:rSP and their linear address. */
1456 uOldSS = pCtx->ss.Sel;
1457 uOldRsp = pCtx->ss.Attr.n.u1DefBig ? pCtx->rsp : pCtx->sp;
1458
1459 GCPtrParmWds = pCtx->ss.u64Base + uOldRsp;
1460
1461 /* HACK ALERT! Probe if the write to the new stack will succeed. May #SS(NewSS)
1462 or #PF, the former is not implemented in this workaround. */
1463 /** @todo Proper fix callgate target stack exceptions. */
1464 /** @todo testcase: Cover callgates with partially or fully inaccessible
1465 * target stacks. */
1466 void *pvNewFrame;
1467 RTGCPTR GCPtrNewStack = X86DESC_BASE(&DescSS.Legacy) + uNewRsp - cbNewStack;
1468 rcStrict = iemMemMap(pVCpu, &pvNewFrame, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW);
1469 if (rcStrict != VINF_SUCCESS)
1470 {
1471 Log(("BranchCallGate: Incoming stack (%04x:%08RX64) not accessible, rc=%Rrc\n", uNewSS, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
1472 return rcStrict;
1473 }
1474 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewFrame, IEM_ACCESS_SYS_RW);
1475 if (rcStrict != VINF_SUCCESS)
1476 {
1477 Log(("BranchCallGate: New stack probe unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1478 return rcStrict;
1479 }
1480
1481 /* Commit new SS:rSP. */
1482 pCtx->ss.Sel = uNewSS;
1483 pCtx->ss.ValidSel = uNewSS;
1484 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1485 pCtx->ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);
1486 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1487 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1488 pCtx->rsp = uNewRsp;
1489 pVCpu->iem.s.uCpl = uNewCSDpl;
1490 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1491 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
1492
1493 /* At this point the stack access must not fail because new state was already committed. */
1494 /** @todo this can still fail due to SS.LIMIT not check. */
1495 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbNewStack,
1496 &uPtrRet.pv, &uNewRsp);
1497 AssertMsgReturn(rcStrict == VINF_SUCCESS, ("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)),
1498 VERR_INTERNAL_ERROR_5);
1499
1500 if (!IEM_IS_LONG_MODE(pVCpu))
1501 {
1502 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1503 {
1504 /* Push the old CS:rIP. */
1505 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1506 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1507
1508 if (cbWords)
1509 {
1510 /* Map the relevant chunk of the old stack. */
1511 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1512 if (rcStrict != VINF_SUCCESS)
1513 {
1514 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1515 return rcStrict;
1516 }
1517
1518 /* Copy the parameter (d)words. */
1519 for (int i = 0; i < cbWords; ++i)
1520 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i];
1521
1522 /* Unmap the old stack. */
1523 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1524 if (rcStrict != VINF_SUCCESS)
1525 {
1526 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1527 return rcStrict;
1528 }
1529 }
1530
1531 /* Push the old SS:rSP. */
1532 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
1533 uPtrRet.pu32[2 + cbWords + 1] = uOldSS;
1534 }
1535 else
1536 {
1537 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1538
1539 /* Push the old CS:rIP. */
1540 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1541 uPtrRet.pu16[1] = pCtx->cs.Sel;
1542
1543 if (cbWords)
1544 {
1545 /* Map the relevant chunk of the old stack. */
1546 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1547 if (rcStrict != VINF_SUCCESS)
1548 {
1549 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1550 return rcStrict;
1551 }
1552
1553 /* Copy the parameter words. */
1554 for (int i = 0; i < cbWords; ++i)
1555 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i];
1556
1557 /* Unmap the old stack. */
1558 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1559 if (rcStrict != VINF_SUCCESS)
1560 {
1561 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1562 return rcStrict;
1563 }
1564 }
1565
1566 /* Push the old SS:rSP. */
1567 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp;
1568 uPtrRet.pu16[2 + cbWords + 1] = uOldSS;
1569 }
1570 }
1571 else
1572 {
1573 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1574
1575 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
1576 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1577 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1578 uPtrRet.pu64[2] = uOldRsp;
1579 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */
1580 }
1581
1582 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1583 if (rcStrict != VINF_SUCCESS)
1584 {
1585 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1586 return rcStrict;
1587 }
1588
1589 /* Chop the high bits off if 16-bit gate (Intel says so). */
1590 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1591 uNewRip = (uint16_t)uNewRip;
1592
1593 /* Limit / canonical check. */
1594 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1595 if (!IEM_IS_LONG_MODE(pVCpu))
1596 {
1597 if (uNewRip > cbLimit)
1598 {
1599 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1600 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1601 }
1602 u64Base = X86DESC_BASE(&DescCS.Legacy);
1603 }
1604 else
1605 {
1606 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1607 if (!IEM_IS_CANONICAL(uNewRip))
1608 {
1609 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1610 return iemRaiseNotCanonical(pVCpu);
1611 }
1612 u64Base = 0;
1613 }
1614
1615 /*
1616 * Now set the accessed bit before
1617 * writing the return address to the stack and committing the result into
1618 * CS, CSHID and RIP.
1619 */
1620 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1621 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1622 {
1623 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1624 if (rcStrict != VINF_SUCCESS)
1625 return rcStrict;
1626 /** @todo check what VT-x and AMD-V does. */
1627 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1628 }
1629
1630 /* Commit new CS:rIP. */
1631 pCtx->rip = uNewRip;
1632 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1633 pCtx->cs.Sel |= pVCpu->iem.s.uCpl;
1634 pCtx->cs.ValidSel = pCtx->cs.Sel;
1635 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1636 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1637 pCtx->cs.u32Limit = cbLimit;
1638 pCtx->cs.u64Base = u64Base;
1639 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1640 }
1641 else
1642 {
1643 /* Same privilege. */
1644 /** @todo: This is very similar to regular far calls; merge! */
1645
1646 /* Check stack first - may #SS(0). */
1647 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
1648 * 16-bit code cause a two or four byte CS to be pushed? */
1649 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
1650 IEM_IS_LONG_MODE(pVCpu) ? 8+8
1651 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
1652 &uPtrRet.pv, &uNewRsp);
1653 if (rcStrict != VINF_SUCCESS)
1654 return rcStrict;
1655
1656 /* Chop the high bits off if 16-bit gate (Intel says so). */
1657 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1658 uNewRip = (uint16_t)uNewRip;
1659
1660 /* Limit / canonical check. */
1661 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1662 if (!IEM_IS_LONG_MODE(pVCpu))
1663 {
1664 if (uNewRip > cbLimit)
1665 {
1666 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1667 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1668 }
1669 u64Base = X86DESC_BASE(&DescCS.Legacy);
1670 }
1671 else
1672 {
1673 if (!IEM_IS_CANONICAL(uNewRip))
1674 {
1675 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1676 return iemRaiseNotCanonical(pVCpu);
1677 }
1678 u64Base = 0;
1679 }
1680
1681 /*
1682 * Now set the accessed bit before
1683 * writing the return address to the stack and committing the result into
1684 * CS, CSHID and RIP.
1685 */
1686 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1687 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1688 {
1689 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1690 if (rcStrict != VINF_SUCCESS)
1691 return rcStrict;
1692 /** @todo check what VT-x and AMD-V does. */
1693 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1694 }
1695
1696 /* stack */
1697 if (!IEM_IS_LONG_MODE(pVCpu))
1698 {
1699 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1700 {
1701 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1702 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1703 }
1704 else
1705 {
1706 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1707 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1708 uPtrRet.pu16[1] = pCtx->cs.Sel;
1709 }
1710 }
1711 else
1712 {
1713 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1714 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1715 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1716 }
1717
1718 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1719 if (rcStrict != VINF_SUCCESS)
1720 return rcStrict;
1721
1722 /* commit */
1723 pCtx->rip = uNewRip;
1724 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1725 pCtx->cs.Sel |= pVCpu->iem.s.uCpl;
1726 pCtx->cs.ValidSel = pCtx->cs.Sel;
1727 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1728 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1729 pCtx->cs.u32Limit = cbLimit;
1730 pCtx->cs.u64Base = u64Base;
1731 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1732 }
1733 }
1734 pCtx->eflags.Bits.u1RF = 0;
1735
1736 /* Flush the prefetch buffer. */
1737# ifdef IEM_WITH_CODE_TLB
1738 pVCpu->iem.s.pbInstrBuf = NULL;
1739# else
1740 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
1741# endif
1742 return VINF_SUCCESS;
1743#endif
1744}
1745
1746
1747/**
1748 * Implements far jumps and calls thru system selectors.
1749 *
1750 * @param uSel The selector.
1751 * @param enmBranch The kind of branching we're performing.
1752 * @param enmEffOpSize The effective operand size.
1753 * @param pDesc The descriptor corresponding to @a uSel.
1754 */
1755IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1756{
1757 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1758 Assert((uSel & X86_SEL_MASK_OFF_RPL));
1759
1760 if (IEM_IS_LONG_MODE(pVCpu))
1761 switch (pDesc->Legacy.Gen.u4Type)
1762 {
1763 case AMD64_SEL_TYPE_SYS_CALL_GATE:
1764 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1765
1766 default:
1767 case AMD64_SEL_TYPE_SYS_LDT:
1768 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
1769 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
1770 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
1771 case AMD64_SEL_TYPE_SYS_INT_GATE:
1772 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1773 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1774 }
1775
1776 switch (pDesc->Legacy.Gen.u4Type)
1777 {
1778 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1779 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1780 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1781
1782 case X86_SEL_TYPE_SYS_TASK_GATE:
1783 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
1784
1785 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1786 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1787 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
1788
1789 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1790 Log(("branch %04x -> busy 286 TSS\n", uSel));
1791 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1792
1793 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1794 Log(("branch %04x -> busy 386 TSS\n", uSel));
1795 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1796
1797 default:
1798 case X86_SEL_TYPE_SYS_LDT:
1799 case X86_SEL_TYPE_SYS_286_INT_GATE:
1800 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1801 case X86_SEL_TYPE_SYS_386_INT_GATE:
1802 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1803 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1804 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1805 }
1806}
1807
1808
1809/**
1810 * Implements far jumps.
1811 *
1812 * @param uSel The selector.
1813 * @param offSeg The segment offset.
1814 * @param enmEffOpSize The effective operand size.
1815 */
1816IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1817{
1818 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1819 NOREF(cbInstr);
1820 Assert(offSeg <= UINT32_MAX);
1821
1822 /*
1823 * Real mode and V8086 mode are easy. The only snag seems to be that
1824 * CS.limit doesn't change and the limit check is done against the current
1825 * limit.
1826 */
1827 /** @todo Robert Collins claims (The Segment Descriptor Cache, DDJ August
1828 * 1998) that up to and including the Intel 486, far control
1829 * transfers in real mode set default CS attributes (0x93) and also
1830 * set a 64K segment limit. Starting with the Pentium, the
1831 * attributes and limit are left alone but the access rights are
1832 * ignored. We only implement the Pentium+ behavior.
1833 * */
1834 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
1835 {
1836 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1837 if (offSeg > pCtx->cs.u32Limit)
1838 {
1839 Log(("iemCImpl_FarJmp: 16-bit limit\n"));
1840 return iemRaiseGeneralProtectionFault0(pVCpu);
1841 }
1842
1843 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
1844 pCtx->rip = offSeg;
1845 else
1846 pCtx->rip = offSeg & UINT16_MAX;
1847 pCtx->cs.Sel = uSel;
1848 pCtx->cs.ValidSel = uSel;
1849 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1850 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1851 pCtx->eflags.Bits.u1RF = 0;
1852 return VINF_SUCCESS;
1853 }
1854
1855 /*
1856 * Protected mode. Need to parse the specified descriptor...
1857 */
1858 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1859 {
1860 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1861 return iemRaiseGeneralProtectionFault0(pVCpu);
1862 }
1863
1864 /* Fetch the descriptor. */
1865 IEMSELDESC Desc;
1866 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
1867 if (rcStrict != VINF_SUCCESS)
1868 return rcStrict;
1869
1870 /* Is it there? */
1871 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
1872 {
1873 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1874 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1875 }
1876
1877 /*
1878 * Deal with it according to its type. We do the standard code selectors
1879 * here and dispatch the system selectors to worker functions.
1880 */
1881 if (!Desc.Legacy.Gen.u1DescType)
1882 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
1883
1884 /* Only code segments. */
1885 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1886 {
1887 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1888 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1889 }
1890
1891 /* L vs D. */
1892 if ( Desc.Legacy.Gen.u1Long
1893 && Desc.Legacy.Gen.u1DefBig
1894 && IEM_IS_LONG_MODE(pVCpu))
1895 {
1896 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1897 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1898 }
1899
1900 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1901 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1902 {
1903 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl)
1904 {
1905 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1906 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1907 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1908 }
1909 }
1910 else
1911 {
1912 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl)
1913 {
1914 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1915 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1916 }
1917 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl)
1918 {
1919 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl));
1920 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1921 }
1922 }
1923
1924 /* Chop the high bits if 16-bit (Intel says so). */
1925 if (enmEffOpSize == IEMMODE_16BIT)
1926 offSeg &= UINT16_MAX;
1927
1928 /* Limit check. (Should alternatively check for non-canonical addresses
1929 here, but that is ruled out by offSeg being 32-bit, right?) */
1930 uint64_t u64Base;
1931 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1932 if (Desc.Legacy.Gen.u1Long)
1933 u64Base = 0;
1934 else
1935 {
1936 if (offSeg > cbLimit)
1937 {
1938 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1939 /** @todo: Intel says this is #GP(0)! */
1940 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1941 }
1942 u64Base = X86DESC_BASE(&Desc.Legacy);
1943 }
1944
1945 /*
1946 * Ok, everything checked out fine. Now set the accessed bit before
1947 * committing the result into CS, CSHID and RIP.
1948 */
1949 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1950 {
1951 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
1952 if (rcStrict != VINF_SUCCESS)
1953 return rcStrict;
1954 /** @todo check what VT-x and AMD-V does. */
1955 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1956 }
1957
1958 /* commit */
1959 pCtx->rip = offSeg;
1960 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1961 pCtx->cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */
1962 pCtx->cs.ValidSel = pCtx->cs.Sel;
1963 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1964 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1965 pCtx->cs.u32Limit = cbLimit;
1966 pCtx->cs.u64Base = u64Base;
1967 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1968 pCtx->eflags.Bits.u1RF = 0;
1969 /** @todo check if the hidden bits are loaded correctly for 64-bit
1970 * mode. */
1971
1972 /* Flush the prefetch buffer. */
1973#ifdef IEM_WITH_CODE_TLB
1974 pVCpu->iem.s.pbInstrBuf = NULL;
1975#else
1976 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
1977#endif
1978
1979 return VINF_SUCCESS;
1980}
1981
1982
1983/**
1984 * Implements far calls.
1985 *
1986 * This very similar to iemCImpl_FarJmp.
1987 *
1988 * @param uSel The selector.
1989 * @param offSeg The segment offset.
1990 * @param enmEffOpSize The operand size (in case we need it).
1991 */
1992IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1993{
1994 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1995 VBOXSTRICTRC rcStrict;
1996 uint64_t uNewRsp;
1997 RTPTRUNION uPtrRet;
1998
1999 /*
2000 * Real mode and V8086 mode are easy. The only snag seems to be that
2001 * CS.limit doesn't change and the limit check is done against the current
2002 * limit.
2003 */
2004 /** @todo See comment for similar code in iemCImpl_FarJmp */
2005 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2006 {
2007 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
2008
2009 /* Check stack first - may #SS(0). */
2010 rcStrict = iemMemStackPushBeginSpecial(pVCpu, enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2011 &uPtrRet.pv, &uNewRsp);
2012 if (rcStrict != VINF_SUCCESS)
2013 return rcStrict;
2014
2015 /* Check the target address range. */
2016 if (offSeg > UINT32_MAX)
2017 return iemRaiseGeneralProtectionFault0(pVCpu);
2018
2019 /* Everything is fine, push the return address. */
2020 if (enmEffOpSize == IEMMODE_16BIT)
2021 {
2022 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
2023 uPtrRet.pu16[1] = pCtx->cs.Sel;
2024 }
2025 else
2026 {
2027 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
2028 uPtrRet.pu16[2] = pCtx->cs.Sel;
2029 }
2030 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2031 if (rcStrict != VINF_SUCCESS)
2032 return rcStrict;
2033
2034 /* Branch. */
2035 pCtx->rip = offSeg;
2036 pCtx->cs.Sel = uSel;
2037 pCtx->cs.ValidSel = uSel;
2038 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2039 pCtx->cs.u64Base = (uint32_t)uSel << 4;
2040 pCtx->eflags.Bits.u1RF = 0;
2041 return VINF_SUCCESS;
2042 }
2043
2044 /*
2045 * Protected mode. Need to parse the specified descriptor...
2046 */
2047 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2048 {
2049 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
2050 return iemRaiseGeneralProtectionFault0(pVCpu);
2051 }
2052
2053 /* Fetch the descriptor. */
2054 IEMSELDESC Desc;
2055 rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
2056 if (rcStrict != VINF_SUCCESS)
2057 return rcStrict;
2058
2059 /*
2060 * Deal with it according to its type. We do the standard code selectors
2061 * here and dispatch the system selectors to worker functions.
2062 */
2063 if (!Desc.Legacy.Gen.u1DescType)
2064 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
2065
2066 /* Only code segments. */
2067 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2068 {
2069 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
2070 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2071 }
2072
2073 /* L vs D. */
2074 if ( Desc.Legacy.Gen.u1Long
2075 && Desc.Legacy.Gen.u1DefBig
2076 && IEM_IS_LONG_MODE(pVCpu))
2077 {
2078 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
2079 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2080 }
2081
2082 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
2083 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2084 {
2085 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl)
2086 {
2087 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
2088 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2089 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2090 }
2091 }
2092 else
2093 {
2094 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl)
2095 {
2096 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2097 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2098 }
2099 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl)
2100 {
2101 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl));
2102 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2103 }
2104 }
2105
2106 /* Is it there? */
2107 if (!Desc.Legacy.Gen.u1Present)
2108 {
2109 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
2110 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
2111 }
2112
2113 /* Check stack first - may #SS(0). */
2114 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
2115 * 16-bit code cause a two or four byte CS to be pushed? */
2116 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
2117 enmEffOpSize == IEMMODE_64BIT ? 8+8
2118 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2119 &uPtrRet.pv, &uNewRsp);
2120 if (rcStrict != VINF_SUCCESS)
2121 return rcStrict;
2122
2123 /* Chop the high bits if 16-bit (Intel says so). */
2124 if (enmEffOpSize == IEMMODE_16BIT)
2125 offSeg &= UINT16_MAX;
2126
2127 /* Limit / canonical check. */
2128 uint64_t u64Base;
2129 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2130 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2131 {
2132 if (!IEM_IS_CANONICAL(offSeg))
2133 {
2134 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
2135 return iemRaiseNotCanonical(pVCpu);
2136 }
2137 u64Base = 0;
2138 }
2139 else
2140 {
2141 if (offSeg > cbLimit)
2142 {
2143 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
2144 /** @todo: Intel says this is #GP(0)! */
2145 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2146 }
2147 u64Base = X86DESC_BASE(&Desc.Legacy);
2148 }
2149
2150 /*
2151 * Now set the accessed bit before
2152 * writing the return address to the stack and committing the result into
2153 * CS, CSHID and RIP.
2154 */
2155 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2156 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2157 {
2158 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2159 if (rcStrict != VINF_SUCCESS)
2160 return rcStrict;
2161 /** @todo check what VT-x and AMD-V does. */
2162 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2163 }
2164
2165 /* stack */
2166 if (enmEffOpSize == IEMMODE_16BIT)
2167 {
2168 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
2169 uPtrRet.pu16[1] = pCtx->cs.Sel;
2170 }
2171 else if (enmEffOpSize == IEMMODE_32BIT)
2172 {
2173 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
2174 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
2175 }
2176 else
2177 {
2178 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
2179 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
2180 }
2181 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2182 if (rcStrict != VINF_SUCCESS)
2183 return rcStrict;
2184
2185 /* commit */
2186 pCtx->rip = offSeg;
2187 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2188 pCtx->cs.Sel |= pVCpu->iem.s.uCpl;
2189 pCtx->cs.ValidSel = pCtx->cs.Sel;
2190 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2191 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2192 pCtx->cs.u32Limit = cbLimit;
2193 pCtx->cs.u64Base = u64Base;
2194 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
2195 pCtx->eflags.Bits.u1RF = 0;
2196 /** @todo check if the hidden bits are loaded correctly for 64-bit
2197 * mode. */
2198
2199 /* Flush the prefetch buffer. */
2200#ifdef IEM_WITH_CODE_TLB
2201 pVCpu->iem.s.pbInstrBuf = NULL;
2202#else
2203 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2204#endif
2205 return VINF_SUCCESS;
2206}
2207
2208
2209/**
2210 * Implements retf.
2211 *
2212 * @param enmEffOpSize The effective operand size.
2213 * @param cbPop The amount of arguments to pop from the stack
2214 * (bytes).
2215 */
2216IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2217{
2218 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2219 VBOXSTRICTRC rcStrict;
2220 RTCPTRUNION uPtrFrame;
2221 uint64_t uNewRsp;
2222 uint64_t uNewRip;
2223 uint16_t uNewCs;
2224 NOREF(cbInstr);
2225
2226 /*
2227 * Read the stack values first.
2228 */
2229 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
2230 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
2231 rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
2232 if (rcStrict != VINF_SUCCESS)
2233 return rcStrict;
2234 if (enmEffOpSize == IEMMODE_16BIT)
2235 {
2236 uNewRip = uPtrFrame.pu16[0];
2237 uNewCs = uPtrFrame.pu16[1];
2238 }
2239 else if (enmEffOpSize == IEMMODE_32BIT)
2240 {
2241 uNewRip = uPtrFrame.pu32[0];
2242 uNewCs = uPtrFrame.pu16[2];
2243 }
2244 else
2245 {
2246 uNewRip = uPtrFrame.pu64[0];
2247 uNewCs = uPtrFrame.pu16[4];
2248 }
2249 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2250 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2251 { /* extremely likely */ }
2252 else
2253 return rcStrict;
2254
2255 /*
2256 * Real mode and V8086 mode are easy.
2257 */
2258 /** @todo See comment for similar code in iemCImpl_FarJmp */
2259 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2260 {
2261 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2262 /** @todo check how this is supposed to work if sp=0xfffe. */
2263
2264 /* Check the limit of the new EIP. */
2265 /** @todo Intel pseudo code only does the limit check for 16-bit
2266 * operands, AMD does not make any distinction. What is right? */
2267 if (uNewRip > pCtx->cs.u32Limit)
2268 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2269
2270 /* commit the operation. */
2271 pCtx->rsp = uNewRsp;
2272 pCtx->rip = uNewRip;
2273 pCtx->cs.Sel = uNewCs;
2274 pCtx->cs.ValidSel = uNewCs;
2275 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2276 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2277 pCtx->eflags.Bits.u1RF = 0;
2278 if (cbPop)
2279 iemRegAddToRsp(pVCpu, pCtx, cbPop);
2280 return VINF_SUCCESS;
2281 }
2282
2283 /*
2284 * Protected mode is complicated, of course.
2285 */
2286 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2287 {
2288 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
2289 return iemRaiseGeneralProtectionFault0(pVCpu);
2290 }
2291
2292 /* Fetch the descriptor. */
2293 IEMSELDESC DescCs;
2294 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCs, uNewCs, X86_XCPT_GP);
2295 if (rcStrict != VINF_SUCCESS)
2296 return rcStrict;
2297
2298 /* Can only return to a code selector. */
2299 if ( !DescCs.Legacy.Gen.u1DescType
2300 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
2301 {
2302 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
2303 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
2304 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2305 }
2306
2307 /* L vs D. */
2308 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
2309 && DescCs.Legacy.Gen.u1DefBig
2310 && IEM_IS_LONG_MODE(pVCpu))
2311 {
2312 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
2313 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2314 }
2315
2316 /* DPL/RPL/CPL checks. */
2317 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
2318 {
2319 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl));
2320 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2321 }
2322
2323 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2324 {
2325 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
2326 {
2327 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
2328 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2329 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2330 }
2331 }
2332 else
2333 {
2334 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
2335 {
2336 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
2337 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2338 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2339 }
2340 }
2341
2342 /* Is it there? */
2343 if (!DescCs.Legacy.Gen.u1Present)
2344 {
2345 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
2346 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2347 }
2348
2349 /*
2350 * Return to outer privilege? (We'll typically have entered via a call gate.)
2351 */
2352 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
2353 {
2354 /* Read the outer stack pointer stored *after* the parameters. */
2355 rcStrict = iemMemStackPopContinueSpecial(pVCpu, cbPop + cbRetPtr, &uPtrFrame.pv, &uNewRsp);
2356 if (rcStrict != VINF_SUCCESS)
2357 return rcStrict;
2358
2359 uPtrFrame.pu8 += cbPop; /* Skip the parameters. */
2360
2361 uint16_t uNewOuterSs;
2362 uint64_t uNewOuterRsp;
2363 if (enmEffOpSize == IEMMODE_16BIT)
2364 {
2365 uNewOuterRsp = uPtrFrame.pu16[0];
2366 uNewOuterSs = uPtrFrame.pu16[1];
2367 }
2368 else if (enmEffOpSize == IEMMODE_32BIT)
2369 {
2370 uNewOuterRsp = uPtrFrame.pu32[0];
2371 uNewOuterSs = uPtrFrame.pu16[2];
2372 }
2373 else
2374 {
2375 uNewOuterRsp = uPtrFrame.pu64[0];
2376 uNewOuterSs = uPtrFrame.pu16[4];
2377 }
2378 uPtrFrame.pu8 -= cbPop; /* Put uPtrFrame back the way it was. */
2379 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2380 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2381 { /* extremely likely */ }
2382 else
2383 return rcStrict;
2384
2385 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
2386 and read the selector. */
2387 IEMSELDESC DescSs;
2388 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
2389 {
2390 if ( !DescCs.Legacy.Gen.u1Long
2391 || (uNewOuterSs & X86_SEL_RPL) == 3)
2392 {
2393 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
2394 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2395 return iemRaiseGeneralProtectionFault0(pVCpu);
2396 }
2397 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
2398 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
2399 }
2400 else
2401 {
2402 /* Fetch the descriptor for the new stack segment. */
2403 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);
2404 if (rcStrict != VINF_SUCCESS)
2405 return rcStrict;
2406 }
2407
2408 /* Check that RPL of stack and code selectors match. */
2409 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
2410 {
2411 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2412 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2413 }
2414
2415 /* Must be a writable data segment. */
2416 if ( !DescSs.Legacy.Gen.u1DescType
2417 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2418 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2419 {
2420 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
2421 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
2422 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2423 }
2424
2425 /* L vs D. (Not mentioned by intel.) */
2426 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
2427 && DescSs.Legacy.Gen.u1DefBig
2428 && IEM_IS_LONG_MODE(pVCpu))
2429 {
2430 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
2431 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2432 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2433 }
2434
2435 /* DPL/RPL/CPL checks. */
2436 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2437 {
2438 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
2439 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
2440 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2441 }
2442
2443 /* Is it there? */
2444 if (!DescSs.Legacy.Gen.u1Present)
2445 {
2446 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2447 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2448 }
2449
2450 /* Calc SS limit.*/
2451 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
2452
2453 /* Is RIP canonical or within CS.limit? */
2454 uint64_t u64Base;
2455 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2456
2457 /** @todo Testcase: Is this correct? */
2458 if ( DescCs.Legacy.Gen.u1Long
2459 && IEM_IS_LONG_MODE(pVCpu) )
2460 {
2461 if (!IEM_IS_CANONICAL(uNewRip))
2462 {
2463 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2464 return iemRaiseNotCanonical(pVCpu);
2465 }
2466 u64Base = 0;
2467 }
2468 else
2469 {
2470 if (uNewRip > cbLimitCs)
2471 {
2472 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
2473 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
2474 /** @todo: Intel says this is #GP(0)! */
2475 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2476 }
2477 u64Base = X86DESC_BASE(&DescCs.Legacy);
2478 }
2479
2480 /*
2481 * Now set the accessed bit before
2482 * writing the return address to the stack and committing the result into
2483 * CS, CSHID and RIP.
2484 */
2485 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
2486 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2487 {
2488 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2489 if (rcStrict != VINF_SUCCESS)
2490 return rcStrict;
2491 /** @todo check what VT-x and AMD-V does. */
2492 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2493 }
2494 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
2495 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2496 {
2497 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewOuterSs);
2498 if (rcStrict != VINF_SUCCESS)
2499 return rcStrict;
2500 /** @todo check what VT-x and AMD-V does. */
2501 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2502 }
2503
2504 /* commit */
2505 if (enmEffOpSize == IEMMODE_16BIT)
2506 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2507 else
2508 pCtx->rip = uNewRip;
2509 pCtx->cs.Sel = uNewCs;
2510 pCtx->cs.ValidSel = uNewCs;
2511 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2512 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2513 pCtx->cs.u32Limit = cbLimitCs;
2514 pCtx->cs.u64Base = u64Base;
2515 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
2516 pCtx->ss.Sel = uNewOuterSs;
2517 pCtx->ss.ValidSel = uNewOuterSs;
2518 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2519 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
2520 pCtx->ss.u32Limit = cbLimitSs;
2521 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2522 pCtx->ss.u64Base = 0;
2523 else
2524 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
2525 if (!pCtx->ss.Attr.n.u1DefBig)
2526 pCtx->sp = (uint16_t)uNewOuterRsp;
2527 else
2528 pCtx->rsp = uNewOuterRsp;
2529
2530 pVCpu->iem.s.uCpl = (uNewCs & X86_SEL_RPL);
2531 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2532 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2533 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2534 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2535
2536 /** @todo check if the hidden bits are loaded correctly for 64-bit
2537 * mode. */
2538
2539 if (cbPop)
2540 iemRegAddToRsp(pVCpu, pCtx, cbPop);
2541 pCtx->eflags.Bits.u1RF = 0;
2542
2543 /* Done! */
2544 }
2545 /*
2546 * Return to the same privilege level
2547 */
2548 else
2549 {
2550 /* Limit / canonical check. */
2551 uint64_t u64Base;
2552 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2553
2554 /** @todo Testcase: Is this correct? */
2555 if ( DescCs.Legacy.Gen.u1Long
2556 && IEM_IS_LONG_MODE(pVCpu) )
2557 {
2558 if (!IEM_IS_CANONICAL(uNewRip))
2559 {
2560 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
2561 return iemRaiseNotCanonical(pVCpu);
2562 }
2563 u64Base = 0;
2564 }
2565 else
2566 {
2567 if (uNewRip > cbLimitCs)
2568 {
2569 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
2570 /** @todo: Intel says this is #GP(0)! */
2571 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2572 }
2573 u64Base = X86DESC_BASE(&DescCs.Legacy);
2574 }
2575
2576 /*
2577 * Now set the accessed bit before
2578 * writing the return address to the stack and committing the result into
2579 * CS, CSHID and RIP.
2580 */
2581 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2582 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2583 {
2584 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2585 if (rcStrict != VINF_SUCCESS)
2586 return rcStrict;
2587 /** @todo check what VT-x and AMD-V does. */
2588 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2589 }
2590
2591 /* commit */
2592 if (!pCtx->ss.Attr.n.u1DefBig)
2593 pCtx->sp = (uint16_t)uNewRsp;
2594 else
2595 pCtx->rsp = uNewRsp;
2596 if (enmEffOpSize == IEMMODE_16BIT)
2597 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2598 else
2599 pCtx->rip = uNewRip;
2600 pCtx->cs.Sel = uNewCs;
2601 pCtx->cs.ValidSel = uNewCs;
2602 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2603 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2604 pCtx->cs.u32Limit = cbLimitCs;
2605 pCtx->cs.u64Base = u64Base;
2606 /** @todo check if the hidden bits are loaded correctly for 64-bit
2607 * mode. */
2608 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
2609 if (cbPop)
2610 iemRegAddToRsp(pVCpu, pCtx, cbPop);
2611 pCtx->eflags.Bits.u1RF = 0;
2612 }
2613
2614 /* Flush the prefetch buffer. */
2615#ifdef IEM_WITH_CODE_TLB
2616 pVCpu->iem.s.pbInstrBuf = NULL;
2617#else
2618 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2619#endif
2620 return VINF_SUCCESS;
2621}
2622
2623
2624/**
2625 * Implements retn.
2626 *
2627 * We're doing this in C because of the \#GP that might be raised if the popped
2628 * program counter is out of bounds.
2629 *
2630 * @param enmEffOpSize The effective operand size.
2631 * @param cbPop The amount of arguments to pop from the stack
2632 * (bytes).
2633 */
2634IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2635{
2636 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2637 NOREF(cbInstr);
2638
2639 /* Fetch the RSP from the stack. */
2640 VBOXSTRICTRC rcStrict;
2641 RTUINT64U NewRip;
2642 RTUINT64U NewRsp;
2643 NewRsp.u = pCtx->rsp;
2644
2645 switch (enmEffOpSize)
2646 {
2647 case IEMMODE_16BIT:
2648 NewRip.u = 0;
2649 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRip.Words.w0, &NewRsp);
2650 break;
2651 case IEMMODE_32BIT:
2652 NewRip.u = 0;
2653 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRip.DWords.dw0, &NewRsp);
2654 break;
2655 case IEMMODE_64BIT:
2656 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRip.u, &NewRsp);
2657 break;
2658 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2659 }
2660 if (rcStrict != VINF_SUCCESS)
2661 return rcStrict;
2662
2663 /* Check the new RSP before loading it. */
2664 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
2665 * of it. The canonical test is performed here and for call. */
2666 if (enmEffOpSize != IEMMODE_64BIT)
2667 {
2668 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
2669 {
2670 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
2671 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2672 }
2673 }
2674 else
2675 {
2676 if (!IEM_IS_CANONICAL(NewRip.u))
2677 {
2678 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
2679 return iemRaiseNotCanonical(pVCpu);
2680 }
2681 }
2682
2683 /* Apply cbPop */
2684 if (cbPop)
2685 iemRegAddToRspEx(pVCpu, pCtx, &NewRsp, cbPop);
2686
2687 /* Commit it. */
2688 pCtx->rip = NewRip.u;
2689 pCtx->rsp = NewRsp.u;
2690 pCtx->eflags.Bits.u1RF = 0;
2691
2692 /* Flush the prefetch buffer. */
2693#ifndef IEM_WITH_CODE_TLB
2694 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2695#endif
2696
2697 return VINF_SUCCESS;
2698}
2699
2700
2701/**
2702 * Implements enter.
2703 *
2704 * We're doing this in C because the instruction is insane, even for the
2705 * u8NestingLevel=0 case dealing with the stack is tedious.
2706 *
2707 * @param enmEffOpSize The effective operand size.
2708 */
2709IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
2710{
2711 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2712
2713 /* Push RBP, saving the old value in TmpRbp. */
2714 RTUINT64U NewRsp; NewRsp.u = pCtx->rsp;
2715 RTUINT64U TmpRbp; TmpRbp.u = pCtx->rbp;
2716 RTUINT64U NewRbp;
2717 VBOXSTRICTRC rcStrict;
2718 if (enmEffOpSize == IEMMODE_64BIT)
2719 {
2720 rcStrict = iemMemStackPushU64Ex(pVCpu, TmpRbp.u, &NewRsp);
2721 NewRbp = NewRsp;
2722 }
2723 else if (enmEffOpSize == IEMMODE_32BIT)
2724 {
2725 rcStrict = iemMemStackPushU32Ex(pVCpu, TmpRbp.DWords.dw0, &NewRsp);
2726 NewRbp = NewRsp;
2727 }
2728 else
2729 {
2730 rcStrict = iemMemStackPushU16Ex(pVCpu, TmpRbp.Words.w0, &NewRsp);
2731 NewRbp = TmpRbp;
2732 NewRbp.Words.w0 = NewRsp.Words.w0;
2733 }
2734 if (rcStrict != VINF_SUCCESS)
2735 return rcStrict;
2736
2737 /* Copy the parameters (aka nesting levels by Intel). */
2738 cParameters &= 0x1f;
2739 if (cParameters > 0)
2740 {
2741 switch (enmEffOpSize)
2742 {
2743 case IEMMODE_16BIT:
2744 if (pCtx->ss.Attr.n.u1DefBig)
2745 TmpRbp.DWords.dw0 -= 2;
2746 else
2747 TmpRbp.Words.w0 -= 2;
2748 do
2749 {
2750 uint16_t u16Tmp;
2751 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Tmp, &TmpRbp);
2752 if (rcStrict != VINF_SUCCESS)
2753 break;
2754 rcStrict = iemMemStackPushU16Ex(pVCpu, u16Tmp, &NewRsp);
2755 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2756 break;
2757
2758 case IEMMODE_32BIT:
2759 if (pCtx->ss.Attr.n.u1DefBig)
2760 TmpRbp.DWords.dw0 -= 4;
2761 else
2762 TmpRbp.Words.w0 -= 4;
2763 do
2764 {
2765 uint32_t u32Tmp;
2766 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Tmp, &TmpRbp);
2767 if (rcStrict != VINF_SUCCESS)
2768 break;
2769 rcStrict = iemMemStackPushU32Ex(pVCpu, u32Tmp, &NewRsp);
2770 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2771 break;
2772
2773 case IEMMODE_64BIT:
2774 TmpRbp.u -= 8;
2775 do
2776 {
2777 uint64_t u64Tmp;
2778 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Tmp, &TmpRbp);
2779 if (rcStrict != VINF_SUCCESS)
2780 break;
2781 rcStrict = iemMemStackPushU64Ex(pVCpu, u64Tmp, &NewRsp);
2782 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2783 break;
2784
2785 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2786 }
2787 if (rcStrict != VINF_SUCCESS)
2788 return VINF_SUCCESS;
2789
2790 /* Push the new RBP */
2791 if (enmEffOpSize == IEMMODE_64BIT)
2792 rcStrict = iemMemStackPushU64Ex(pVCpu, NewRbp.u, &NewRsp);
2793 else if (enmEffOpSize == IEMMODE_32BIT)
2794 rcStrict = iemMemStackPushU32Ex(pVCpu, NewRbp.DWords.dw0, &NewRsp);
2795 else
2796 rcStrict = iemMemStackPushU16Ex(pVCpu, NewRbp.Words.w0, &NewRsp);
2797 if (rcStrict != VINF_SUCCESS)
2798 return rcStrict;
2799
2800 }
2801
2802 /* Recalc RSP. */
2803 iemRegSubFromRspEx(pVCpu, pCtx, &NewRsp, cbFrame);
2804
2805 /** @todo Should probe write access at the new RSP according to AMD. */
2806
2807 /* Commit it. */
2808 pCtx->rbp = NewRbp.u;
2809 pCtx->rsp = NewRsp.u;
2810 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2811
2812 return VINF_SUCCESS;
2813}
2814
2815
2816
2817/**
2818 * Implements leave.
2819 *
2820 * We're doing this in C because messing with the stack registers is annoying
2821 * since they depends on SS attributes.
2822 *
2823 * @param enmEffOpSize The effective operand size.
2824 */
2825IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
2826{
2827 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2828
2829 /* Calculate the intermediate RSP from RBP and the stack attributes. */
2830 RTUINT64U NewRsp;
2831 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2832 NewRsp.u = pCtx->rbp;
2833 else if (pCtx->ss.Attr.n.u1DefBig)
2834 NewRsp.u = pCtx->ebp;
2835 else
2836 {
2837 /** @todo Check that LEAVE actually preserve the high EBP bits. */
2838 NewRsp.u = pCtx->rsp;
2839 NewRsp.Words.w0 = pCtx->bp;
2840 }
2841
2842 /* Pop RBP according to the operand size. */
2843 VBOXSTRICTRC rcStrict;
2844 RTUINT64U NewRbp;
2845 switch (enmEffOpSize)
2846 {
2847 case IEMMODE_16BIT:
2848 NewRbp.u = pCtx->rbp;
2849 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRbp.Words.w0, &NewRsp);
2850 break;
2851 case IEMMODE_32BIT:
2852 NewRbp.u = 0;
2853 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRbp.DWords.dw0, &NewRsp);
2854 break;
2855 case IEMMODE_64BIT:
2856 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRbp.u, &NewRsp);
2857 break;
2858 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2859 }
2860 if (rcStrict != VINF_SUCCESS)
2861 return rcStrict;
2862
2863
2864 /* Commit it. */
2865 pCtx->rbp = NewRbp.u;
2866 pCtx->rsp = NewRsp.u;
2867 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2868
2869 return VINF_SUCCESS;
2870}
2871
2872
2873/**
2874 * Implements int3 and int XX.
2875 *
2876 * @param u8Int The interrupt vector number.
2877 * @param enmInt The int instruction type.
2878 */
2879IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt)
2880{
2881 Assert(pVCpu->iem.s.cXcptRecursions == 0);
2882 return iemRaiseXcptOrInt(pVCpu,
2883 cbInstr,
2884 u8Int,
2885 IEM_XCPT_FLAGS_T_SOFT_INT | enmInt,
2886 0,
2887 0);
2888}
2889
2890
2891/**
2892 * Implements iret for real mode and V8086 mode.
2893 *
2894 * @param enmEffOpSize The effective operand size.
2895 */
2896IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
2897{
2898 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
2899 X86EFLAGS Efl;
2900 Efl.u = IEMMISC_GET_EFL(pVCpu, pCtx);
2901 NOREF(cbInstr);
2902
2903 /*
2904 * iret throws an exception if VME isn't enabled.
2905 */
2906 if ( Efl.Bits.u1VM
2907 && Efl.Bits.u2IOPL != 3
2908 && !(pCtx->cr4 & X86_CR4_VME))
2909 return iemRaiseGeneralProtectionFault0(pVCpu);
2910
2911 /*
2912 * Do the stack bits, but don't commit RSP before everything checks
2913 * out right.
2914 */
2915 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2916 VBOXSTRICTRC rcStrict;
2917 RTCPTRUNION uFrame;
2918 uint16_t uNewCs;
2919 uint32_t uNewEip;
2920 uint32_t uNewFlags;
2921 uint64_t uNewRsp;
2922 if (enmEffOpSize == IEMMODE_32BIT)
2923 {
2924 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, &uFrame.pv, &uNewRsp);
2925 if (rcStrict != VINF_SUCCESS)
2926 return rcStrict;
2927 uNewEip = uFrame.pu32[0];
2928 if (uNewEip > UINT16_MAX)
2929 return iemRaiseGeneralProtectionFault0(pVCpu);
2930
2931 uNewCs = (uint16_t)uFrame.pu32[1];
2932 uNewFlags = uFrame.pu32[2];
2933 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2934 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
2935 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
2936 | X86_EFL_ID;
2937 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
2938 uNewFlags &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
2939 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
2940 }
2941 else
2942 {
2943 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, &uFrame.pv, &uNewRsp);
2944 if (rcStrict != VINF_SUCCESS)
2945 return rcStrict;
2946 uNewEip = uFrame.pu16[0];
2947 uNewCs = uFrame.pu16[1];
2948 uNewFlags = uFrame.pu16[2];
2949 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2950 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
2951 uNewFlags |= Efl.u & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF);
2952 /** @todo The intel pseudo code does not indicate what happens to
2953 * reserved flags. We just ignore them. */
2954 /* Ancient CPU adjustments: See iemCImpl_popf. */
2955 if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286)
2956 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL);
2957 }
2958 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uFrame.pv);
2959 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2960 { /* extremely likely */ }
2961 else
2962 return rcStrict;
2963
2964 /** @todo Check how this is supposed to work if sp=0xfffe. */
2965 Log7(("iemCImpl_iret_real_v8086: uNewCs=%#06x uNewRip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n",
2966 uNewCs, uNewEip, uNewFlags, uNewRsp));
2967
2968 /*
2969 * Check the limit of the new EIP.
2970 */
2971 /** @todo Only the AMD pseudo code check the limit here, what's
2972 * right? */
2973 if (uNewEip > pCtx->cs.u32Limit)
2974 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2975
2976 /*
2977 * V8086 checks and flag adjustments
2978 */
2979 if (Efl.Bits.u1VM)
2980 {
2981 if (Efl.Bits.u2IOPL == 3)
2982 {
2983 /* Preserve IOPL and clear RF. */
2984 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
2985 uNewFlags |= Efl.u & (X86_EFL_IOPL);
2986 }
2987 else if ( enmEffOpSize == IEMMODE_16BIT
2988 && ( !(uNewFlags & X86_EFL_IF)
2989 || !Efl.Bits.u1VIP )
2990 && !(uNewFlags & X86_EFL_TF) )
2991 {
2992 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
2993 uNewFlags &= ~X86_EFL_VIF;
2994 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
2995 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
2996 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
2997 }
2998 else
2999 return iemRaiseGeneralProtectionFault0(pVCpu);
3000 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags));
3001 }
3002
3003 /*
3004 * Commit the operation.
3005 */
3006#ifdef DBGFTRACE_ENABLED
3007 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
3008 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewRsp);
3009#endif
3010 pCtx->rsp = uNewRsp;
3011 pCtx->rip = uNewEip;
3012 pCtx->cs.Sel = uNewCs;
3013 pCtx->cs.ValidSel = uNewCs;
3014 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3015 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
3016 /** @todo do we load attribs and limit as well? */
3017 Assert(uNewFlags & X86_EFL_1);
3018 IEMMISC_SET_EFL(pVCpu, pCtx, uNewFlags);
3019
3020 /* Flush the prefetch buffer. */
3021#ifdef IEM_WITH_CODE_TLB
3022 pVCpu->iem.s.pbInstrBuf = NULL;
3023#else
3024 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3025#endif
3026
3027 return VINF_SUCCESS;
3028}
3029
3030
3031/**
3032 * Loads a segment register when entering V8086 mode.
3033 *
3034 * @param pSReg The segment register.
3035 * @param uSeg The segment to load.
3036 */
3037static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
3038{
3039 pSReg->Sel = uSeg;
3040 pSReg->ValidSel = uSeg;
3041 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3042 pSReg->u64Base = (uint32_t)uSeg << 4;
3043 pSReg->u32Limit = 0xffff;
3044 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
3045 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
3046 * IRET'ing to V8086. */
3047}
3048
3049
3050/**
3051 * Implements iret for protected mode returning to V8086 mode.
3052 *
3053 * @param pCtx Pointer to the CPU context.
3054 * @param uNewEip The new EIP.
3055 * @param uNewCs The new CS.
3056 * @param uNewFlags The new EFLAGS.
3057 * @param uNewRsp The RSP after the initial IRET frame.
3058 *
3059 * @note This can only be a 32-bit iret du to the X86_EFL_VM position.
3060 */
3061IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uint16_t, uNewCs,
3062 uint32_t, uNewFlags, uint64_t, uNewRsp)
3063{
3064 RT_NOREF_PV(cbInstr);
3065
3066 /*
3067 * Pop the V8086 specific frame bits off the stack.
3068 */
3069 VBOXSTRICTRC rcStrict;
3070 RTCPTRUNION uFrame;
3071 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 24, &uFrame.pv, &uNewRsp);
3072 if (rcStrict != VINF_SUCCESS)
3073 return rcStrict;
3074 uint32_t uNewEsp = uFrame.pu32[0];
3075 uint16_t uNewSs = uFrame.pu32[1];
3076 uint16_t uNewEs = uFrame.pu32[2];
3077 uint16_t uNewDs = uFrame.pu32[3];
3078 uint16_t uNewFs = uFrame.pu32[4];
3079 uint16_t uNewGs = uFrame.pu32[5];
3080 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3081 if (rcStrict != VINF_SUCCESS)
3082 return rcStrict;
3083
3084 /*
3085 * Commit the operation.
3086 */
3087 uNewFlags &= X86_EFL_LIVE_MASK;
3088 uNewFlags |= X86_EFL_RA1_MASK;
3089#ifdef DBGFTRACE_ENABLED
3090 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
3091 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);
3092#endif
3093 Log7(("iemCImpl_iret_prot_v8086: %04x:%08x -> %04x:%04x %x %04x:%04x\n", pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp));
3094
3095 IEMMISC_SET_EFL(pVCpu, pCtx, uNewFlags);
3096 iemCImplCommonV8086LoadSeg(&pCtx->cs, uNewCs);
3097 iemCImplCommonV8086LoadSeg(&pCtx->ss, uNewSs);
3098 iemCImplCommonV8086LoadSeg(&pCtx->es, uNewEs);
3099 iemCImplCommonV8086LoadSeg(&pCtx->ds, uNewDs);
3100 iemCImplCommonV8086LoadSeg(&pCtx->fs, uNewFs);
3101 iemCImplCommonV8086LoadSeg(&pCtx->gs, uNewGs);
3102 pCtx->rip = (uint16_t)uNewEip;
3103 pCtx->rsp = uNewEsp; /** @todo check this out! */
3104 pVCpu->iem.s.uCpl = 3;
3105
3106 /* Flush the prefetch buffer. */
3107#ifdef IEM_WITH_CODE_TLB
3108 pVCpu->iem.s.pbInstrBuf = NULL;
3109#else
3110 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3111#endif
3112
3113 return VINF_SUCCESS;
3114}
3115
3116
3117/**
3118 * Implements iret for protected mode returning via a nested task.
3119 *
3120 * @param enmEffOpSize The effective operand size.
3121 */
3122IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
3123{
3124 Log7(("iemCImpl_iret_prot_NestedTask:\n"));
3125#ifndef IEM_IMPLEMENTS_TASKSWITCH
3126 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
3127#else
3128 RT_NOREF_PV(enmEffOpSize);
3129
3130 /*
3131 * Read the segment selector in the link-field of the current TSS.
3132 */
3133 RTSEL uSelRet;
3134 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
3135 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &uSelRet, UINT8_MAX, pCtx->tr.u64Base);
3136 if (rcStrict != VINF_SUCCESS)
3137 return rcStrict;
3138
3139 /*
3140 * Fetch the returning task's TSS descriptor from the GDT.
3141 */
3142 if (uSelRet & X86_SEL_LDT)
3143 {
3144 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet));
3145 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet);
3146 }
3147
3148 IEMSELDESC TssDesc;
3149 rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelRet, X86_XCPT_GP);
3150 if (rcStrict != VINF_SUCCESS)
3151 return rcStrict;
3152
3153 if (TssDesc.Legacy.Gate.u1DescType)
3154 {
3155 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet));
3156 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3157 }
3158
3159 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
3160 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
3161 {
3162 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
3163 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3164 }
3165
3166 if (!TssDesc.Legacy.Gate.u1Present)
3167 {
3168 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet));
3169 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3170 }
3171
3172 uint32_t uNextEip = pCtx->eip + cbInstr;
3173 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
3174 0 /* uCr2 */, uSelRet, &TssDesc);
3175#endif
3176}
3177
3178
3179/**
3180 * Implements iret for protected mode
3181 *
3182 * @param enmEffOpSize The effective operand size.
3183 */
3184IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
3185{
3186 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
3187 NOREF(cbInstr);
3188 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3189
3190 /*
3191 * Nested task return.
3192 */
3193 if (pCtx->eflags.Bits.u1NT)
3194 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
3195
3196 /*
3197 * Normal return.
3198 *
3199 * Do the stack bits, but don't commit RSP before everything checks
3200 * out right.
3201 */
3202 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3203 VBOXSTRICTRC rcStrict;
3204 RTCPTRUNION uFrame;
3205 uint16_t uNewCs;
3206 uint32_t uNewEip;
3207 uint32_t uNewFlags;
3208 uint64_t uNewRsp;
3209 if (enmEffOpSize == IEMMODE_32BIT)
3210 {
3211 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, &uFrame.pv, &uNewRsp);
3212 if (rcStrict != VINF_SUCCESS)
3213 return rcStrict;
3214 uNewEip = uFrame.pu32[0];
3215 uNewCs = (uint16_t)uFrame.pu32[1];
3216 uNewFlags = uFrame.pu32[2];
3217 }
3218 else
3219 {
3220 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, &uFrame.pv, &uNewRsp);
3221 if (rcStrict != VINF_SUCCESS)
3222 return rcStrict;
3223 uNewEip = uFrame.pu16[0];
3224 uNewCs = uFrame.pu16[1];
3225 uNewFlags = uFrame.pu16[2];
3226 }
3227 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3228 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3229 { /* extremely likely */ }
3230 else
3231 return rcStrict;
3232 Log7(("iemCImpl_iret_prot: uNewCs=%#06x uNewEip=%#010x uNewFlags=%#x uNewRsp=%#18llx uCpl=%u\n", uNewCs, uNewEip, uNewFlags, uNewRsp, pVCpu->iem.s.uCpl));
3233
3234 /*
3235 * We're hopefully not returning to V8086 mode...
3236 */
3237 if ( (uNewFlags & X86_EFL_VM)
3238 && pVCpu->iem.s.uCpl == 0)
3239 {
3240 Assert(enmEffOpSize == IEMMODE_32BIT);
3241 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, pCtx, uNewEip, uNewCs, uNewFlags, uNewRsp);
3242 }
3243
3244 /*
3245 * Protected mode.
3246 */
3247 /* Read the CS descriptor. */
3248 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3249 {
3250 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
3251 return iemRaiseGeneralProtectionFault0(pVCpu);
3252 }
3253
3254 IEMSELDESC DescCS;
3255 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3256 if (rcStrict != VINF_SUCCESS)
3257 {
3258 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
3259 return rcStrict;
3260 }
3261
3262 /* Must be a code descriptor. */
3263 if (!DescCS.Legacy.Gen.u1DescType)
3264 {
3265 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3266 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3267 }
3268 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3269 {
3270 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3271 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3272 }
3273
3274#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3275 /* Raw ring-0 and ring-1 compression adjustments for PATM performance tricks and other CS leaks. */
3276 PVM pVM = pVCpu->CTX_SUFF(pVM);
3277 if (EMIsRawRing0Enabled(pVM) && VM_IS_RAW_MODE_ENABLED(pVM))
3278 {
3279 if ((uNewCs & X86_SEL_RPL) == 1)
3280 {
3281 if ( pVCpu->iem.s.uCpl == 0
3282 && ( !EMIsRawRing1Enabled(pVM)
3283 || pCtx->cs.Sel == (uNewCs & X86_SEL_MASK_OFF_RPL)) )
3284 {
3285 Log(("iret: Ring-0 compression fix: uNewCS=%#x -> %#x\n", uNewCs, uNewCs & X86_SEL_MASK_OFF_RPL));
3286 uNewCs &= X86_SEL_MASK_OFF_RPL;
3287 }
3288# ifdef LOG_ENABLED
3289 else if (pVCpu->iem.s.uCpl <= 1 && EMIsRawRing1Enabled(pVM))
3290 Log(("iret: uNewCs=%#x genuine return to ring-1.\n", uNewCs));
3291# endif
3292 }
3293 else if ( (uNewCs & X86_SEL_RPL) == 2
3294 && EMIsRawRing1Enabled(pVM)
3295 && pVCpu->iem.s.uCpl <= 1)
3296 {
3297 Log(("iret: Ring-1 compression fix: uNewCS=%#x -> %#x\n", uNewCs, (uNewCs & X86_SEL_MASK_OFF_RPL) | 1));
3298 uNewCs = (uNewCs & X86_SEL_MASK_OFF_RPL) | 2;
3299 }
3300 }
3301#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
3302
3303
3304 /* Privilege checks. */
3305 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3306 {
3307 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3308 {
3309 Log(("iret %04x:%08x - RPL != DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3310 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3311 }
3312 }
3313 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3314 {
3315 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3316 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3317 }
3318 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
3319 {
3320 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pVCpu->iem.s.uCpl));
3321 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3322 }
3323
3324 /* Present? */
3325 if (!DescCS.Legacy.Gen.u1Present)
3326 {
3327 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
3328 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3329 }
3330
3331 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3332
3333 /*
3334 * Return to outer level?
3335 */
3336 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
3337 {
3338 uint16_t uNewSS;
3339 uint32_t uNewESP;
3340 if (enmEffOpSize == IEMMODE_32BIT)
3341 {
3342 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 8, &uFrame.pv, &uNewRsp);
3343 if (rcStrict != VINF_SUCCESS)
3344 return rcStrict;
3345/** @todo We might be popping a 32-bit ESP from the IRET frame, but whether
3346 * 16-bit or 32-bit are being loaded into SP depends on the D/B
3347 * bit of the popped SS selector it turns out. */
3348 uNewESP = uFrame.pu32[0];
3349 uNewSS = (uint16_t)uFrame.pu32[1];
3350 }
3351 else
3352 {
3353 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 4, &uFrame.pv, &uNewRsp);
3354 if (rcStrict != VINF_SUCCESS)
3355 return rcStrict;
3356 uNewESP = uFrame.pu16[0];
3357 uNewSS = uFrame.pu16[1];
3358 }
3359 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
3360 if (rcStrict != VINF_SUCCESS)
3361 return rcStrict;
3362 Log7(("iemCImpl_iret_prot: uNewSS=%#06x uNewESP=%#010x\n", uNewSS, uNewESP));
3363
3364 /* Read the SS descriptor. */
3365 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3366 {
3367 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
3368 return iemRaiseGeneralProtectionFault0(pVCpu);
3369 }
3370
3371 IEMSELDESC DescSS;
3372 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
3373 if (rcStrict != VINF_SUCCESS)
3374 {
3375 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
3376 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
3377 return rcStrict;
3378 }
3379
3380 /* Privilege checks. */
3381 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3382 {
3383 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
3384 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3385 }
3386 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3387 {
3388 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
3389 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
3390 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3391 }
3392
3393 /* Must be a writeable data segment descriptor. */
3394 if (!DescSS.Legacy.Gen.u1DescType)
3395 {
3396 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
3397 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3398 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3399 }
3400 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3401 {
3402 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
3403 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3404 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3405 }
3406
3407 /* Present? */
3408 if (!DescSS.Legacy.Gen.u1Present)
3409 {
3410 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
3411 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
3412 }
3413
3414 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3415
3416 /* Check EIP. */
3417 if (uNewEip > cbLimitCS)
3418 {
3419 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
3420 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
3421 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3422 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3423 }
3424
3425 /*
3426 * Commit the changes, marking CS and SS accessed first since
3427 * that may fail.
3428 */
3429 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3430 {
3431 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3432 if (rcStrict != VINF_SUCCESS)
3433 return rcStrict;
3434 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3435 }
3436 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3437 {
3438 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3439 if (rcStrict != VINF_SUCCESS)
3440 return rcStrict;
3441 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3442 }
3443
3444 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3445 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3446 if (enmEffOpSize != IEMMODE_16BIT)
3447 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3448 if (pVCpu->iem.s.uCpl == 0)
3449 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3450 else if (pVCpu->iem.s.uCpl <= pCtx->eflags.Bits.u2IOPL)
3451 fEFlagsMask |= X86_EFL_IF;
3452 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3453 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3454 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu, pCtx);
3455 fEFlagsNew &= ~fEFlagsMask;
3456 fEFlagsNew |= uNewFlags & fEFlagsMask;
3457#ifdef DBGFTRACE_ENABLED
3458 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
3459 pVCpu->iem.s.uCpl, uNewCs & X86_SEL_RPL, pCtx->cs.Sel, pCtx->eip,
3460 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP);
3461#endif
3462
3463 IEMMISC_SET_EFL(pVCpu, pCtx, fEFlagsNew);
3464 pCtx->rip = uNewEip;
3465 pCtx->cs.Sel = uNewCs;
3466 pCtx->cs.ValidSel = uNewCs;
3467 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3468 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3469 pCtx->cs.u32Limit = cbLimitCS;
3470 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3471 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
3472
3473 pCtx->ss.Sel = uNewSS;
3474 pCtx->ss.ValidSel = uNewSS;
3475 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3476 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3477 pCtx->ss.u32Limit = cbLimitSs;
3478 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3479 if (!pCtx->ss.Attr.n.u1DefBig)
3480 pCtx->sp = (uint16_t)uNewESP;
3481 else
3482 pCtx->rsp = uNewESP;
3483
3484 pVCpu->iem.s.uCpl = uNewCs & X86_SEL_RPL;
3485 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
3486 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
3487 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
3488 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
3489
3490 /* Done! */
3491
3492 }
3493 /*
3494 * Return to the same level.
3495 */
3496 else
3497 {
3498 /* Check EIP. */
3499 if (uNewEip > cbLimitCS)
3500 {
3501 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
3502 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3503 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3504 }
3505
3506 /*
3507 * Commit the changes, marking CS first since it may fail.
3508 */
3509 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3510 {
3511 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3512 if (rcStrict != VINF_SUCCESS)
3513 return rcStrict;
3514 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3515 }
3516
3517 X86EFLAGS NewEfl;
3518 NewEfl.u = IEMMISC_GET_EFL(pVCpu, pCtx);
3519 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3520 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3521 if (enmEffOpSize != IEMMODE_16BIT)
3522 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3523 if (pVCpu->iem.s.uCpl == 0)
3524 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3525 else if (pVCpu->iem.s.uCpl <= NewEfl.Bits.u2IOPL)
3526 fEFlagsMask |= X86_EFL_IF;
3527 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3528 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3529 NewEfl.u &= ~fEFlagsMask;
3530 NewEfl.u |= fEFlagsMask & uNewFlags;
3531#ifdef DBGFTRACE_ENABLED
3532 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
3533 pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip,
3534 uNewCs, uNewEip, uNewFlags, pCtx->ss.Sel, uNewRsp);
3535#endif
3536
3537 IEMMISC_SET_EFL(pVCpu, pCtx, NewEfl.u);
3538 pCtx->rip = uNewEip;
3539 pCtx->cs.Sel = uNewCs;
3540 pCtx->cs.ValidSel = uNewCs;
3541 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3542 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3543 pCtx->cs.u32Limit = cbLimitCS;
3544 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3545 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
3546 if (!pCtx->ss.Attr.n.u1DefBig)
3547 pCtx->sp = (uint16_t)uNewRsp;
3548 else
3549 pCtx->rsp = uNewRsp;
3550 /* Done! */
3551 }
3552
3553 /* Flush the prefetch buffer. */
3554#ifdef IEM_WITH_CODE_TLB
3555 pVCpu->iem.s.pbInstrBuf = NULL;
3556#else
3557 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3558#endif
3559
3560 return VINF_SUCCESS;
3561}
3562
3563
3564/**
3565 * Implements iret for long mode
3566 *
3567 * @param enmEffOpSize The effective operand size.
3568 */
3569IEM_CIMPL_DEF_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize)
3570{
3571 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
3572 NOREF(cbInstr);
3573
3574 /*
3575 * Nested task return is not supported in long mode.
3576 */
3577 if (pCtx->eflags.Bits.u1NT)
3578 {
3579 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pCtx->eflags.u));
3580 return iemRaiseGeneralProtectionFault0(pVCpu);
3581 }
3582
3583 /*
3584 * Normal return.
3585 *
3586 * Do the stack bits, but don't commit RSP before everything checks
3587 * out right.
3588 */
3589 VBOXSTRICTRC rcStrict;
3590 RTCPTRUNION uFrame;
3591 uint64_t uNewRip;
3592 uint16_t uNewCs;
3593 uint16_t uNewSs;
3594 uint32_t uNewFlags;
3595 uint64_t uNewRsp;
3596 if (enmEffOpSize == IEMMODE_64BIT)
3597 {
3598 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, &uFrame.pv, &uNewRsp);
3599 if (rcStrict != VINF_SUCCESS)
3600 return rcStrict;
3601 uNewRip = uFrame.pu64[0];
3602 uNewCs = (uint16_t)uFrame.pu64[1];
3603 uNewFlags = (uint32_t)uFrame.pu64[2];
3604 uNewRsp = uFrame.pu64[3];
3605 uNewSs = (uint16_t)uFrame.pu64[4];
3606 }
3607 else if (enmEffOpSize == IEMMODE_32BIT)
3608 {
3609 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, &uFrame.pv, &uNewRsp);
3610 if (rcStrict != VINF_SUCCESS)
3611 return rcStrict;
3612 uNewRip = uFrame.pu32[0];
3613 uNewCs = (uint16_t)uFrame.pu32[1];
3614 uNewFlags = uFrame.pu32[2];
3615 uNewRsp = uFrame.pu32[3];
3616 uNewSs = (uint16_t)uFrame.pu32[4];
3617 }
3618 else
3619 {
3620 Assert(enmEffOpSize == IEMMODE_16BIT);
3621 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, &uFrame.pv, &uNewRsp);
3622 if (rcStrict != VINF_SUCCESS)
3623 return rcStrict;
3624 uNewRip = uFrame.pu16[0];
3625 uNewCs = uFrame.pu16[1];
3626 uNewFlags = uFrame.pu16[2];
3627 uNewRsp = uFrame.pu16[3];
3628 uNewSs = uFrame.pu16[4];
3629 }
3630 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3631 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3632 { /* extremely like */ }
3633 else
3634 return rcStrict;
3635 Log7(("iretq stack: cs:rip=%04x:%016RX64 rflags=%016RX64 ss:rsp=%04x:%016RX64\n", uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
3636
3637 /*
3638 * Check stuff.
3639 */
3640 /* Read the CS descriptor. */
3641 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3642 {
3643 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3644 return iemRaiseGeneralProtectionFault0(pVCpu);
3645 }
3646
3647 IEMSELDESC DescCS;
3648 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3649 if (rcStrict != VINF_SUCCESS)
3650 {
3651 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
3652 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3653 return rcStrict;
3654 }
3655
3656 /* Must be a code descriptor. */
3657 if ( !DescCS.Legacy.Gen.u1DescType
3658 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3659 {
3660 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
3661 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3662 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3663 }
3664
3665 /* Privilege checks. */
3666 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
3667 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3668 {
3669 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3670 {
3671 Log(("iret %04x:%016RX64 - RPL != DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3672 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3673 }
3674 }
3675 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3676 {
3677 Log(("iret %04x:%016RX64 - RPL < DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3678 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3679 }
3680 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
3681 {
3682 Log(("iret %04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl));
3683 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3684 }
3685
3686 /* Present? */
3687 if (!DescCS.Legacy.Gen.u1Present)
3688 {
3689 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3690 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3691 }
3692
3693 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3694
3695 /* Read the SS descriptor. */
3696 IEMSELDESC DescSS;
3697 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3698 {
3699 if ( !DescCS.Legacy.Gen.u1Long
3700 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
3701 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
3702 {
3703 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3704 return iemRaiseGeneralProtectionFault0(pVCpu);
3705 }
3706 DescSS.Legacy.u = 0;
3707 }
3708 else
3709 {
3710 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
3711 if (rcStrict != VINF_SUCCESS)
3712 {
3713 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
3714 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3715 return rcStrict;
3716 }
3717 }
3718
3719 /* Privilege checks. */
3720 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3721 {
3722 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3723 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3724 }
3725
3726 uint32_t cbLimitSs;
3727 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3728 cbLimitSs = UINT32_MAX;
3729 else
3730 {
3731 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3732 {
3733 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
3734 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
3735 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3736 }
3737
3738 /* Must be a writeable data segment descriptor. */
3739 if (!DescSS.Legacy.Gen.u1DescType)
3740 {
3741 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
3742 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3743 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3744 }
3745 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3746 {
3747 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
3748 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3749 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3750 }
3751
3752 /* Present? */
3753 if (!DescSS.Legacy.Gen.u1Present)
3754 {
3755 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3756 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSs);
3757 }
3758 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3759 }
3760
3761 /* Check EIP. */
3762 if (DescCS.Legacy.Gen.u1Long)
3763 {
3764 if (!IEM_IS_CANONICAL(uNewRip))
3765 {
3766 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
3767 uNewCs, uNewRip, uNewSs, uNewRsp));
3768 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3769 }
3770 }
3771 else
3772 {
3773 if (uNewRip > cbLimitCS)
3774 {
3775 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
3776 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
3777 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3778 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3779 }
3780 }
3781
3782 /*
3783 * Commit the changes, marking CS and SS accessed first since
3784 * that may fail.
3785 */
3786 /** @todo where exactly are these actually marked accessed by a real CPU? */
3787 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3788 {
3789 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3790 if (rcStrict != VINF_SUCCESS)
3791 return rcStrict;
3792 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3793 }
3794 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3795 {
3796 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSs);
3797 if (rcStrict != VINF_SUCCESS)
3798 return rcStrict;
3799 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3800 }
3801
3802 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3803 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3804 if (enmEffOpSize != IEMMODE_16BIT)
3805 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3806 if (pVCpu->iem.s.uCpl == 0)
3807 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
3808 else if (pVCpu->iem.s.uCpl <= pCtx->eflags.Bits.u2IOPL)
3809 fEFlagsMask |= X86_EFL_IF;
3810 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu, pCtx);
3811 fEFlagsNew &= ~fEFlagsMask;
3812 fEFlagsNew |= uNewFlags & fEFlagsMask;
3813#ifdef DBGFTRACE_ENABLED
3814 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
3815 pVCpu->iem.s.uCpl, uNewCpl, pCtx->rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);
3816#endif
3817
3818 IEMMISC_SET_EFL(pVCpu, pCtx, fEFlagsNew);
3819 pCtx->rip = uNewRip;
3820 pCtx->cs.Sel = uNewCs;
3821 pCtx->cs.ValidSel = uNewCs;
3822 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3823 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3824 pCtx->cs.u32Limit = cbLimitCS;
3825 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3826 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
3827 if (pCtx->cs.Attr.n.u1Long || pCtx->cs.Attr.n.u1DefBig)
3828 pCtx->rsp = uNewRsp;
3829 else
3830 pCtx->sp = (uint16_t)uNewRsp;
3831 pCtx->ss.Sel = uNewSs;
3832 pCtx->ss.ValidSel = uNewSs;
3833 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3834 {
3835 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3836 pCtx->ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
3837 pCtx->ss.u32Limit = UINT32_MAX;
3838 pCtx->ss.u64Base = 0;
3839 Log2(("iretq new SS: NULL\n"));
3840 }
3841 else
3842 {
3843 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3844 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3845 pCtx->ss.u32Limit = cbLimitSs;
3846 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3847 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u));
3848 }
3849
3850 if (pVCpu->iem.s.uCpl != uNewCpl)
3851 {
3852 pVCpu->iem.s.uCpl = uNewCpl;
3853 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->ds);
3854 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->es);
3855 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->fs);
3856 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->gs);
3857 }
3858
3859 /* Flush the prefetch buffer. */
3860#ifdef IEM_WITH_CODE_TLB
3861 pVCpu->iem.s.pbInstrBuf = NULL;
3862#else
3863 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3864#endif
3865
3866 return VINF_SUCCESS;
3867}
3868
3869
3870/**
3871 * Implements iret.
3872 *
3873 * @param enmEffOpSize The effective operand size.
3874 */
3875IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
3876{
3877 /*
3878 * First, clear NMI blocking, if any, before causing any exceptions.
3879 * See Intel spec. 6.7.1 "Handling Multiple NMIs".
3880 */
3881 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
3882
3883 /*
3884 * The SVM nested-guest intercept for iret takes priority over all exceptions,
3885 * see AMD spec. "15.9 Instruction Intercepts".
3886 */
3887 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IRET))
3888 {
3889 Log(("iret: Guest intercept -> #VMEXIT\n"));
3890 IEM_SVM_UPDATE_NRIP(pVCpu);
3891 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3892 }
3893
3894 /*
3895 * Call a mode specific worker.
3896 */
3897 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
3898 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
3899 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
3900 return IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize);
3901 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
3902}
3903
3904
3905/**
3906 * Implements SYSCALL (AMD and Intel64).
3907 *
3908 * @param enmEffOpSize The effective operand size.
3909 */
3910IEM_CIMPL_DEF_0(iemCImpl_syscall)
3911{
3912 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
3913
3914 /*
3915 * Check preconditions.
3916 *
3917 * Note that CPUs described in the documentation may load a few odd values
3918 * into CS and SS than we allow here. This has yet to be checked on real
3919 * hardware.
3920 */
3921 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
3922 {
3923 Log(("syscall: Not enabled in EFER -> #UD\n"));
3924 return iemRaiseUndefinedOpcode(pVCpu);
3925 }
3926 if (!(pCtx->cr0 & X86_CR0_PE))
3927 {
3928 Log(("syscall: Protected mode is required -> #GP(0)\n"));
3929 return iemRaiseGeneralProtectionFault0(pVCpu);
3930 }
3931 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(pCtx))
3932 {
3933 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3934 return iemRaiseUndefinedOpcode(pVCpu);
3935 }
3936
3937 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
3938 /** @todo what about LDT selectors? Shouldn't matter, really. */
3939 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
3940 uint16_t uNewSs = uNewCs + 8;
3941 if (uNewCs == 0 || uNewSs == 0)
3942 {
3943 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
3944 return iemRaiseGeneralProtectionFault0(pVCpu);
3945 }
3946
3947 /* Long mode and legacy mode differs. */
3948 if (CPUMIsGuestInLongModeEx(pCtx))
3949 {
3950 uint64_t uNewRip = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->msrLSTAR : pCtx-> msrCSTAR;
3951
3952 /* This test isn't in the docs, but I'm not trusting the guys writing
3953 the MSRs to have validated the values as canonical like they should. */
3954 if (!IEM_IS_CANONICAL(uNewRip))
3955 {
3956 Log(("syscall: Only available in long mode on intel -> #UD\n"));
3957 return iemRaiseUndefinedOpcode(pVCpu);
3958 }
3959
3960 /*
3961 * Commit it.
3962 */
3963 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, uNewRip));
3964 pCtx->rcx = pCtx->rip + cbInstr;
3965 pCtx->rip = uNewRip;
3966
3967 pCtx->rflags.u &= ~X86_EFL_RF;
3968 pCtx->r11 = pCtx->rflags.u;
3969 pCtx->rflags.u &= ~pCtx->msrSFMASK;
3970 pCtx->rflags.u |= X86_EFL_1;
3971
3972 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3973 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3974 }
3975 else
3976 {
3977 /*
3978 * Commit it.
3979 */
3980 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n",
3981 pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, (uint32_t)(pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
3982 pCtx->rcx = pCtx->eip + cbInstr;
3983 pCtx->rip = pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
3984 pCtx->rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
3985
3986 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
3987 pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
3988 }
3989 pCtx->cs.Sel = uNewCs;
3990 pCtx->cs.ValidSel = uNewCs;
3991 pCtx->cs.u64Base = 0;
3992 pCtx->cs.u32Limit = UINT32_MAX;
3993 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3994
3995 pCtx->ss.Sel = uNewSs;
3996 pCtx->ss.ValidSel = uNewSs;
3997 pCtx->ss.u64Base = 0;
3998 pCtx->ss.u32Limit = UINT32_MAX;
3999 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4000
4001 /* Flush the prefetch buffer. */
4002#ifdef IEM_WITH_CODE_TLB
4003 pVCpu->iem.s.pbInstrBuf = NULL;
4004#else
4005 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4006#endif
4007
4008 return VINF_SUCCESS;
4009}
4010
4011
4012/**
4013 * Implements SYSRET (AMD and Intel64).
4014 */
4015IEM_CIMPL_DEF_0(iemCImpl_sysret)
4016
4017{
4018 RT_NOREF_PV(cbInstr);
4019 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4020
4021 /*
4022 * Check preconditions.
4023 *
4024 * Note that CPUs described in the documentation may load a few odd values
4025 * into CS and SS than we allow here. This has yet to be checked on real
4026 * hardware.
4027 */
4028 if (!(pCtx->msrEFER & MSR_K6_EFER_SCE))
4029 {
4030 Log(("sysret: Not enabled in EFER -> #UD\n"));
4031 return iemRaiseUndefinedOpcode(pVCpu);
4032 }
4033 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(pCtx))
4034 {
4035 Log(("sysret: Only available in long mode on intel -> #UD\n"));
4036 return iemRaiseUndefinedOpcode(pVCpu);
4037 }
4038 if (!(pCtx->cr0 & X86_CR0_PE))
4039 {
4040 Log(("sysret: Protected mode is required -> #GP(0)\n"));
4041 return iemRaiseGeneralProtectionFault0(pVCpu);
4042 }
4043 if (pVCpu->iem.s.uCpl != 0)
4044 {
4045 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
4046 return iemRaiseGeneralProtectionFault0(pVCpu);
4047 }
4048
4049 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
4050 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4051 uint16_t uNewSs = uNewCs + 8;
4052 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
4053 uNewCs += 16;
4054 if (uNewCs == 0 || uNewSs == 0)
4055 {
4056 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4057 return iemRaiseGeneralProtectionFault0(pVCpu);
4058 }
4059
4060 /*
4061 * Commit it.
4062 */
4063 if (CPUMIsGuestInLongModeEx(pCtx))
4064 {
4065 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
4066 {
4067 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n",
4068 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->rcx, pCtx->r11));
4069 /* Note! We disregard intel manual regarding the RCX cananonical
4070 check, ask intel+xen why AMD doesn't do it. */
4071 pCtx->rip = pCtx->rcx;
4072 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4073 | (3 << X86DESCATTR_DPL_SHIFT);
4074 }
4075 else
4076 {
4077 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n",
4078 pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->ecx, pCtx->r11));
4079 pCtx->rip = pCtx->ecx;
4080 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4081 | (3 << X86DESCATTR_DPL_SHIFT);
4082 }
4083 /** @todo testcase: See what kind of flags we can make SYSRET restore and
4084 * what it really ignores. RF and VM are hinted at being zero, by AMD. */
4085 pCtx->rflags.u = pCtx->r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
4086 pCtx->rflags.u |= X86_EFL_1;
4087 }
4088 else
4089 {
4090 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, pCtx->ecx));
4091 pCtx->rip = pCtx->rcx;
4092 pCtx->rflags.u |= X86_EFL_IF;
4093 pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4094 | (3 << X86DESCATTR_DPL_SHIFT);
4095 }
4096 pCtx->cs.Sel = uNewCs | 3;
4097 pCtx->cs.ValidSel = uNewCs | 3;
4098 pCtx->cs.u64Base = 0;
4099 pCtx->cs.u32Limit = UINT32_MAX;
4100 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4101
4102 pCtx->ss.Sel = uNewSs | 3;
4103 pCtx->ss.ValidSel = uNewSs | 3;
4104 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4105 /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */
4106 pCtx->ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
4107 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
4108 * on sysret. */
4109
4110 /* Flush the prefetch buffer. */
4111#ifdef IEM_WITH_CODE_TLB
4112 pVCpu->iem.s.pbInstrBuf = NULL;
4113#else
4114 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4115#endif
4116
4117 return VINF_SUCCESS;
4118}
4119
4120
4121/**
4122 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
4123 *
4124 * @param iSegReg The segment register number (valid).
4125 * @param uSel The new selector value.
4126 */
4127IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
4128{
4129 /*PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);*/
4130 uint16_t *pSel = iemSRegRef(pVCpu, iSegReg);
4131 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4132
4133 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
4134
4135 /*
4136 * Real mode and V8086 mode are easy.
4137 */
4138 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4139 {
4140 *pSel = uSel;
4141 pHid->u64Base = (uint32_t)uSel << 4;
4142 pHid->ValidSel = uSel;
4143 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4144#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
4145 /** @todo Does the CPU actually load limits and attributes in the
4146 * real/V8086 mode segment load case? It doesn't for CS in far
4147 * jumps... Affects unreal mode. */
4148 pHid->u32Limit = 0xffff;
4149 pHid->Attr.u = 0;
4150 pHid->Attr.n.u1Present = 1;
4151 pHid->Attr.n.u1DescType = 1;
4152 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
4153 ? X86_SEL_TYPE_RW
4154 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
4155#endif
4156 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4157 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4158 return VINF_SUCCESS;
4159 }
4160
4161 /*
4162 * Protected mode.
4163 *
4164 * Check if it's a null segment selector value first, that's OK for DS, ES,
4165 * FS and GS. If not null, then we have to load and parse the descriptor.
4166 */
4167 if (!(uSel & X86_SEL_MASK_OFF_RPL))
4168 {
4169 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
4170 if (iSegReg == X86_SREG_SS)
4171 {
4172 /* In 64-bit kernel mode, the stack can be 0 because of the way
4173 interrupts are dispatched. AMD seems to have a slighly more
4174 relaxed relationship to SS.RPL than intel does. */
4175 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
4176 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4177 || pVCpu->iem.s.uCpl > 2
4178 || ( uSel != pVCpu->iem.s.uCpl
4179 && !IEM_IS_GUEST_CPU_AMD(pVCpu)) )
4180 {
4181 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
4182 return iemRaiseGeneralProtectionFault0(pVCpu);
4183 }
4184 }
4185
4186 *pSel = uSel; /* Not RPL, remember :-) */
4187 iemHlpLoadNullDataSelectorProt(pVCpu, pHid, uSel);
4188 if (iSegReg == X86_SREG_SS)
4189 pHid->Attr.u |= pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT;
4190
4191 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4192 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4193
4194 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4195 return VINF_SUCCESS;
4196 }
4197
4198 /* Fetch the descriptor. */
4199 IEMSELDESC Desc;
4200 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
4201 if (rcStrict != VINF_SUCCESS)
4202 return rcStrict;
4203
4204 /* Check GPs first. */
4205 if (!Desc.Legacy.Gen.u1DescType)
4206 {
4207 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
4208 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4209 }
4210 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
4211 {
4212 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4213 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
4214 {
4215 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
4216 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4217 }
4218 if ((uSel & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
4219 {
4220 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pVCpu->iem.s.uCpl));
4221 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4222 }
4223 if (Desc.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl)
4224 {
4225 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4226 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4227 }
4228 }
4229 else
4230 {
4231 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4232 {
4233 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
4234 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4235 }
4236 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4237 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4238 {
4239#if 0 /* this is what intel says. */
4240 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4241 && pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4242 {
4243 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
4244 iSegReg, uSel, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl));
4245 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4246 }
4247#else /* this is what makes more sense. */
4248 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4249 {
4250 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4251 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4252 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4253 }
4254 if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4255 {
4256 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4257 iSegReg, uSel, pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl));
4258 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4259 }
4260#endif
4261 }
4262 }
4263
4264 /* Is it there? */
4265 if (!Desc.Legacy.Gen.u1Present)
4266 {
4267 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4268 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
4269 }
4270
4271 /* The base and limit. */
4272 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4273 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
4274
4275 /*
4276 * Ok, everything checked out fine. Now set the accessed bit before
4277 * committing the result into the registers.
4278 */
4279 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4280 {
4281 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
4282 if (rcStrict != VINF_SUCCESS)
4283 return rcStrict;
4284 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4285 }
4286
4287 /* commit */
4288 *pSel = uSel;
4289 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4290 pHid->u32Limit = cbLimit;
4291 pHid->u64Base = u64Base;
4292 pHid->ValidSel = uSel;
4293 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4294
4295 /** @todo check if the hidden bits are loaded correctly for 64-bit
4296 * mode. */
4297 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4298
4299 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4300 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4301 return VINF_SUCCESS;
4302}
4303
4304
4305/**
4306 * Implements 'mov SReg, r/m'.
4307 *
4308 * @param iSegReg The segment register number (valid).
4309 * @param uSel The new selector value.
4310 */
4311IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
4312{
4313 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4314 if (rcStrict == VINF_SUCCESS)
4315 {
4316 if (iSegReg == X86_SREG_SS)
4317 {
4318 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4319 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
4320 }
4321 }
4322 return rcStrict;
4323}
4324
4325
4326/**
4327 * Implements 'pop SReg'.
4328 *
4329 * @param iSegReg The segment register number (valid).
4330 * @param enmEffOpSize The efficient operand size (valid).
4331 */
4332IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4333{
4334 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4335 VBOXSTRICTRC rcStrict;
4336
4337 /*
4338 * Read the selector off the stack and join paths with mov ss, reg.
4339 */
4340 RTUINT64U TmpRsp;
4341 TmpRsp.u = pCtx->rsp;
4342 switch (enmEffOpSize)
4343 {
4344 case IEMMODE_16BIT:
4345 {
4346 uint16_t uSel;
4347 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp);
4348 if (rcStrict == VINF_SUCCESS)
4349 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4350 break;
4351 }
4352
4353 case IEMMODE_32BIT:
4354 {
4355 uint32_t u32Value;
4356 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp);
4357 if (rcStrict == VINF_SUCCESS)
4358 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
4359 break;
4360 }
4361
4362 case IEMMODE_64BIT:
4363 {
4364 uint64_t u64Value;
4365 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp);
4366 if (rcStrict == VINF_SUCCESS)
4367 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
4368 break;
4369 }
4370 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4371 }
4372
4373 /*
4374 * Commit the stack on success.
4375 */
4376 if (rcStrict == VINF_SUCCESS)
4377 {
4378 pCtx->rsp = TmpRsp.u;
4379 if (iSegReg == X86_SREG_SS)
4380 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
4381 }
4382 return rcStrict;
4383}
4384
4385
4386/**
4387 * Implements lgs, lfs, les, lds & lss.
4388 */
4389IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
4390 uint16_t, uSel,
4391 uint64_t, offSeg,
4392 uint8_t, iSegReg,
4393 uint8_t, iGReg,
4394 IEMMODE, enmEffOpSize)
4395{
4396 /*PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);*/
4397 VBOXSTRICTRC rcStrict;
4398
4399 /*
4400 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
4401 */
4402 /** @todo verify and test that mov, pop and lXs works the segment
4403 * register loading in the exact same way. */
4404 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4405 if (rcStrict == VINF_SUCCESS)
4406 {
4407 switch (enmEffOpSize)
4408 {
4409 case IEMMODE_16BIT:
4410 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4411 break;
4412 case IEMMODE_32BIT:
4413 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4414 break;
4415 case IEMMODE_64BIT:
4416 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4417 break;
4418 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4419 }
4420 }
4421
4422 return rcStrict;
4423}
4424
4425
4426/**
4427 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
4428 *
4429 * @retval VINF_SUCCESS on success.
4430 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
4431 * @retval iemMemFetchSysU64 return value.
4432 *
4433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4434 * @param uSel The selector value.
4435 * @param fAllowSysDesc Whether system descriptors are OK or not.
4436 * @param pDesc Where to return the descriptor on success.
4437 */
4438static VBOXSTRICTRC iemCImpl_LoadDescHelper(PVMCPU pVCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
4439{
4440 pDesc->Long.au64[0] = 0;
4441 pDesc->Long.au64[1] = 0;
4442
4443 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */
4444 return VINF_IEM_SELECTOR_NOT_OK;
4445
4446 /* Within the table limits? */
4447 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4448 RTGCPTR GCPtrBase;
4449 if (uSel & X86_SEL_LDT)
4450 {
4451 if ( !pCtx->ldtr.Attr.n.u1Present
4452 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
4453 return VINF_IEM_SELECTOR_NOT_OK;
4454 GCPtrBase = pCtx->ldtr.u64Base;
4455 }
4456 else
4457 {
4458 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
4459 return VINF_IEM_SELECTOR_NOT_OK;
4460 GCPtrBase = pCtx->gdtr.pGdt;
4461 }
4462
4463 /* Fetch the descriptor. */
4464 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4465 if (rcStrict != VINF_SUCCESS)
4466 return rcStrict;
4467 if (!pDesc->Legacy.Gen.u1DescType)
4468 {
4469 if (!fAllowSysDesc)
4470 return VINF_IEM_SELECTOR_NOT_OK;
4471 if (CPUMIsGuestInLongModeEx(pCtx))
4472 {
4473 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
4474 if (rcStrict != VINF_SUCCESS)
4475 return rcStrict;
4476 }
4477
4478 }
4479
4480 return VINF_SUCCESS;
4481}
4482
4483
4484/**
4485 * Implements verr (fWrite = false) and verw (fWrite = true).
4486 */
4487IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite)
4488{
4489 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4490
4491 /** @todo figure whether the accessed bit is set or not. */
4492
4493 bool fAccessible = true;
4494 IEMSELDESC Desc;
4495 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4496 if (rcStrict == VINF_SUCCESS)
4497 {
4498 /* Check the descriptor, order doesn't matter much here. */
4499 if ( !Desc.Legacy.Gen.u1DescType
4500 || !Desc.Legacy.Gen.u1Present)
4501 fAccessible = false;
4502 else
4503 {
4504 if ( fWrite
4505 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE
4506 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4507 fAccessible = false;
4508
4509 /** @todo testcase for the conforming behavior. */
4510 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4511 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4512 {
4513 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4514 fAccessible = false;
4515 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4516 fAccessible = false;
4517 }
4518 }
4519
4520 }
4521 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4522 fAccessible = false;
4523 else
4524 return rcStrict;
4525
4526 /* commit */
4527 IEM_GET_CTX(pVCpu)->eflags.Bits.u1ZF = fAccessible;
4528
4529 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4530 return VINF_SUCCESS;
4531}
4532
4533
4534/**
4535 * Implements LAR and LSL with 64-bit operand size.
4536 *
4537 * @returns VINF_SUCCESS.
4538 * @param pu16Dst Pointer to the destination register.
4539 * @param uSel The selector to load details for.
4540 * @param fIsLar true = LAR, false = LSL.
4541 */
4542IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar)
4543{
4544 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4545
4546 /** @todo figure whether the accessed bit is set or not. */
4547
4548 bool fDescOk = true;
4549 IEMSELDESC Desc;
4550 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4551 if (rcStrict == VINF_SUCCESS)
4552 {
4553 /*
4554 * Check the descriptor type.
4555 */
4556 if (!Desc.Legacy.Gen.u1DescType)
4557 {
4558 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4559 {
4560 if (Desc.Long.Gen.u5Zeros)
4561 fDescOk = false;
4562 else
4563 switch (Desc.Long.Gen.u4Type)
4564 {
4565 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
4566 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
4567 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
4568 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
4569 break;
4570 case AMD64_SEL_TYPE_SYS_CALL_GATE:
4571 fDescOk = fIsLar;
4572 break;
4573 default:
4574 fDescOk = false;
4575 break;
4576 }
4577 }
4578 else
4579 {
4580 switch (Desc.Long.Gen.u4Type)
4581 {
4582 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4583 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4584 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4585 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4586 case X86_SEL_TYPE_SYS_LDT:
4587 break;
4588 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4589 case X86_SEL_TYPE_SYS_TASK_GATE:
4590 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4591 fDescOk = fIsLar;
4592 break;
4593 default:
4594 fDescOk = false;
4595 break;
4596 }
4597 }
4598 }
4599 if (fDescOk)
4600 {
4601 /*
4602 * Check the RPL/DPL/CPL interaction..
4603 */
4604 /** @todo testcase for the conforming behavior. */
4605 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
4606 || !Desc.Legacy.Gen.u1DescType)
4607 {
4608 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4609 fDescOk = false;
4610 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4611 fDescOk = false;
4612 }
4613 }
4614
4615 if (fDescOk)
4616 {
4617 /*
4618 * All fine, start committing the result.
4619 */
4620 if (fIsLar)
4621 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00);
4622 else
4623 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy);
4624 }
4625
4626 }
4627 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4628 fDescOk = false;
4629 else
4630 return rcStrict;
4631
4632 /* commit flags value and advance rip. */
4633 IEM_GET_CTX(pVCpu)->eflags.Bits.u1ZF = fDescOk;
4634 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4635
4636 return VINF_SUCCESS;
4637}
4638
4639
4640/**
4641 * Implements LAR and LSL with 16-bit operand size.
4642 *
4643 * @returns VINF_SUCCESS.
4644 * @param pu16Dst Pointer to the destination register.
4645 * @param u16Sel The selector to load details for.
4646 * @param fIsLar true = LAR, false = LSL.
4647 */
4648IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar)
4649{
4650 uint64_t u64TmpDst = *pu16Dst;
4651 IEM_CIMPL_CALL_3(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, fIsLar);
4652 *pu16Dst = u64TmpDst;
4653 return VINF_SUCCESS;
4654}
4655
4656
4657/**
4658 * Implements lgdt.
4659 *
4660 * @param iEffSeg The segment of the new gdtr contents
4661 * @param GCPtrEffSrc The address of the new gdtr contents.
4662 * @param enmEffOpSize The effective operand size.
4663 */
4664IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4665{
4666 if (pVCpu->iem.s.uCpl != 0)
4667 return iemRaiseGeneralProtectionFault0(pVCpu);
4668 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
4669
4670 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_WRITES))
4671 {
4672 Log(("lgdt: Guest intercept -> #VMEXIT\n"));
4673 IEM_SVM_UPDATE_NRIP(pVCpu);
4674 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4675 }
4676
4677 /*
4678 * Fetch the limit and base address.
4679 */
4680 uint16_t cbLimit;
4681 RTGCPTR GCPtrBase;
4682 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4683 if (rcStrict == VINF_SUCCESS)
4684 {
4685 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4686 || X86_IS_CANONICAL(GCPtrBase))
4687 {
4688 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4689 rcStrict = CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
4690 else
4691 {
4692 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4693 pCtx->gdtr.cbGdt = cbLimit;
4694 pCtx->gdtr.pGdt = GCPtrBase;
4695 }
4696 if (rcStrict == VINF_SUCCESS)
4697 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4698 }
4699 else
4700 {
4701 Log(("iemCImpl_lgdt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
4702 return iemRaiseGeneralProtectionFault0(pVCpu);
4703 }
4704 }
4705 return rcStrict;
4706}
4707
4708
4709/**
4710 * Implements sgdt.
4711 *
4712 * @param iEffSeg The segment where to store the gdtr content.
4713 * @param GCPtrEffDst The address where to store the gdtr content.
4714 */
4715IEM_CIMPL_DEF_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4716{
4717 /*
4718 * Join paths with sidt.
4719 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4720 * you really must know.
4721 */
4722 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_READS))
4723 {
4724 Log(("sgdt: Guest intercept -> #VMEXIT\n"));
4725 IEM_SVM_UPDATE_NRIP(pVCpu);
4726 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_GDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4727 }
4728
4729 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4730 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst);
4731 if (rcStrict == VINF_SUCCESS)
4732 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4733 return rcStrict;
4734}
4735
4736
4737/**
4738 * Implements lidt.
4739 *
4740 * @param iEffSeg The segment of the new idtr contents
4741 * @param GCPtrEffSrc The address of the new idtr contents.
4742 * @param enmEffOpSize The effective operand size.
4743 */
4744IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4745{
4746 if (pVCpu->iem.s.uCpl != 0)
4747 return iemRaiseGeneralProtectionFault0(pVCpu);
4748 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
4749
4750 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_WRITES))
4751 {
4752 Log(("lidt: Guest intercept -> #VMEXIT\n"));
4753 IEM_SVM_UPDATE_NRIP(pVCpu);
4754 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4755 }
4756
4757 /*
4758 * Fetch the limit and base address.
4759 */
4760 uint16_t cbLimit;
4761 RTGCPTR GCPtrBase;
4762 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4763 if (rcStrict == VINF_SUCCESS)
4764 {
4765 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4766 || X86_IS_CANONICAL(GCPtrBase))
4767 {
4768 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4769 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
4770 else
4771 {
4772 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4773 pCtx->idtr.cbIdt = cbLimit;
4774 pCtx->idtr.pIdt = GCPtrBase;
4775 }
4776 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4777 }
4778 else
4779 {
4780 Log(("iemCImpl_lidt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
4781 return iemRaiseGeneralProtectionFault0(pVCpu);
4782 }
4783 }
4784 return rcStrict;
4785}
4786
4787
4788/**
4789 * Implements sidt.
4790 *
4791 * @param iEffSeg The segment where to store the idtr content.
4792 * @param GCPtrEffDst The address where to store the idtr content.
4793 */
4794IEM_CIMPL_DEF_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4795{
4796 /*
4797 * Join paths with sgdt.
4798 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4799 * you really must know.
4800 */
4801 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
4802 {
4803 Log(("sidt: Guest intercept -> #VMEXIT\n"));
4804 IEM_SVM_UPDATE_NRIP(pVCpu);
4805 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4806 }
4807
4808 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4809 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst);
4810 if (rcStrict == VINF_SUCCESS)
4811 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4812 return rcStrict;
4813}
4814
4815
4816/**
4817 * Implements lldt.
4818 *
4819 * @param uNewLdt The new LDT selector value.
4820 */
4821IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
4822{
4823 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4824
4825 /*
4826 * Check preconditions.
4827 */
4828 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4829 {
4830 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
4831 return iemRaiseUndefinedOpcode(pVCpu);
4832 }
4833 if (pVCpu->iem.s.uCpl != 0)
4834 {
4835 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pVCpu->iem.s.uCpl));
4836 return iemRaiseGeneralProtectionFault0(pVCpu);
4837 }
4838 if (uNewLdt & X86_SEL_LDT)
4839 {
4840 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
4841 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewLdt);
4842 }
4843
4844 /*
4845 * Now, loading a NULL selector is easy.
4846 */
4847 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4848 {
4849 /* Nested-guest SVM intercept. */
4850 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
4851 {
4852 Log(("lldt: Guest intercept -> #VMEXIT\n"));
4853 IEM_SVM_UPDATE_NRIP(pVCpu);
4854 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4855 }
4856
4857 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
4858 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4859 CPUMSetGuestLDTR(pVCpu, uNewLdt);
4860 else
4861 pCtx->ldtr.Sel = uNewLdt;
4862 pCtx->ldtr.ValidSel = uNewLdt;
4863 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4864 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4865 {
4866 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4867 pCtx->ldtr.u64Base = pCtx->ldtr.u32Limit = 0; /* For verfication against REM. */
4868 }
4869 else if (IEM_IS_GUEST_CPU_AMD(pVCpu))
4870 {
4871 /* AMD-V seems to leave the base and limit alone. */
4872 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4873 }
4874 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4875 {
4876 /* VT-x (Intel 3960x) seems to be doing the following. */
4877 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;
4878 pCtx->ldtr.u64Base = 0;
4879 pCtx->ldtr.u32Limit = UINT32_MAX;
4880 }
4881
4882 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4883 return VINF_SUCCESS;
4884 }
4885
4886 /*
4887 * Read the descriptor.
4888 */
4889 IEMSELDESC Desc;
4890 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
4891 if (rcStrict != VINF_SUCCESS)
4892 return rcStrict;
4893
4894 /* Check GPs first. */
4895 if (Desc.Legacy.Gen.u1DescType)
4896 {
4897 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4898 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4899 }
4900 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4901 {
4902 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
4903 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4904 }
4905 uint64_t u64Base;
4906 if (!IEM_IS_LONG_MODE(pVCpu))
4907 u64Base = X86DESC_BASE(&Desc.Legacy);
4908 else
4909 {
4910 if (Desc.Long.Gen.u5Zeros)
4911 {
4912 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
4913 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4914 }
4915
4916 u64Base = X86DESC64_BASE(&Desc.Long);
4917 if (!IEM_IS_CANONICAL(u64Base))
4918 {
4919 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
4920 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4921 }
4922 }
4923
4924 /* NP */
4925 if (!Desc.Legacy.Gen.u1Present)
4926 {
4927 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
4928 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewLdt);
4929 }
4930
4931 /* Nested-guest SVM intercept. */
4932 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
4933 {
4934 Log(("lldt: Guest intercept -> #VMEXIT\n"));
4935 IEM_SVM_UPDATE_NRIP(pVCpu);
4936 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4937 }
4938
4939 /*
4940 * It checks out alright, update the registers.
4941 */
4942/** @todo check if the actual value is loaded or if the RPL is dropped */
4943 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4944 CPUMSetGuestLDTR(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4945 else
4946 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4947 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
4948 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4949 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4950 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
4951 pCtx->ldtr.u64Base = u64Base;
4952
4953 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4954 return VINF_SUCCESS;
4955}
4956
4957
4958/**
4959 * Implements lldt.
4960 *
4961 * @param uNewLdt The new LDT selector value.
4962 */
4963IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
4964{
4965 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4966
4967 /*
4968 * Check preconditions.
4969 */
4970 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4971 {
4972 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
4973 return iemRaiseUndefinedOpcode(pVCpu);
4974 }
4975 if (pVCpu->iem.s.uCpl != 0)
4976 {
4977 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pVCpu->iem.s.uCpl));
4978 return iemRaiseGeneralProtectionFault0(pVCpu);
4979 }
4980 if (uNewTr & X86_SEL_LDT)
4981 {
4982 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
4983 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewTr);
4984 }
4985 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
4986 {
4987 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
4988 return iemRaiseGeneralProtectionFault0(pVCpu);
4989 }
4990 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TR_WRITES))
4991 {
4992 Log(("ltr: Guest intercept -> #VMEXIT\n"));
4993 IEM_SVM_UPDATE_NRIP(pVCpu);
4994 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4995 }
4996
4997 /*
4998 * Read the descriptor.
4999 */
5000 IEMSELDESC Desc;
5001 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
5002 if (rcStrict != VINF_SUCCESS)
5003 return rcStrict;
5004
5005 /* Check GPs first. */
5006 if (Desc.Legacy.Gen.u1DescType)
5007 {
5008 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5009 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5010 }
5011 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
5012 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
5013 || IEM_IS_LONG_MODE(pVCpu)) )
5014 {
5015 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5016 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5017 }
5018 uint64_t u64Base;
5019 if (!IEM_IS_LONG_MODE(pVCpu))
5020 u64Base = X86DESC_BASE(&Desc.Legacy);
5021 else
5022 {
5023 if (Desc.Long.Gen.u5Zeros)
5024 {
5025 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
5026 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5027 }
5028
5029 u64Base = X86DESC64_BASE(&Desc.Long);
5030 if (!IEM_IS_CANONICAL(u64Base))
5031 {
5032 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
5033 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5034 }
5035 }
5036
5037 /* NP */
5038 if (!Desc.Legacy.Gen.u1Present)
5039 {
5040 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
5041 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewTr);
5042 }
5043
5044 /*
5045 * Set it busy.
5046 * Note! Intel says this should lock down the whole descriptor, but we'll
5047 * restrict our selves to 32-bit for now due to lack of inline
5048 * assembly and such.
5049 */
5050 void *pvDesc;
5051 rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW);
5052 if (rcStrict != VINF_SUCCESS)
5053 return rcStrict;
5054 switch ((uintptr_t)pvDesc & 3)
5055 {
5056 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
5057 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
5058 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
5059 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
5060 }
5061 rcStrict = iemMemCommitAndUnmap(pVCpu, pvDesc, IEM_ACCESS_DATA_RW);
5062 if (rcStrict != VINF_SUCCESS)
5063 return rcStrict;
5064 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
5065
5066 /*
5067 * It checks out alright, update the registers.
5068 */
5069/** @todo check if the actual value is loaded or if the RPL is dropped */
5070 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5071 CPUMSetGuestTR(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5072 else
5073 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
5074 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
5075 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
5076 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5077 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5078 pCtx->tr.u64Base = u64Base;
5079
5080 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5081 return VINF_SUCCESS;
5082}
5083
5084
5085/**
5086 * Implements mov GReg,CRx.
5087 *
5088 * @param iGReg The general register to store the CRx value in.
5089 * @param iCrReg The CRx register to read (valid).
5090 */
5091IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
5092{
5093 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5094 if (pVCpu->iem.s.uCpl != 0)
5095 return iemRaiseGeneralProtectionFault0(pVCpu);
5096 Assert(!pCtx->eflags.Bits.u1VM);
5097
5098 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(pVCpu, iCrReg))
5099 {
5100 Log(("iemCImpl_mov_Rd_Cd: Guest intercept CR%u -> #VMEXIT\n", iCrReg));
5101 IEM_SVM_UPDATE_NRIP(pVCpu);
5102 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);
5103 }
5104
5105 /* read it */
5106 uint64_t crX;
5107 switch (iCrReg)
5108 {
5109 case 0:
5110 crX = pCtx->cr0;
5111 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
5112 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */
5113 break;
5114 case 2: crX = pCtx->cr2; break;
5115 case 3: crX = pCtx->cr3; break;
5116 case 4: crX = pCtx->cr4; break;
5117 case 8:
5118 {
5119#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5120 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5121 {
5122 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
5123 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
5124 {
5125 crX = pVmcbCtrl->IntCtrl.n.u8VTPR & 0xf;
5126 break;
5127 }
5128 }
5129#endif
5130 uint8_t uTpr;
5131 int rc = APICGetTpr(pVCpu, &uTpr, NULL, NULL);
5132 if (RT_SUCCESS(rc))
5133 crX = uTpr >> 4;
5134 else
5135 crX = 0;
5136 break;
5137 }
5138 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5139 }
5140
5141 /* store it */
5142 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5143 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = crX;
5144 else
5145 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)crX;
5146
5147 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5148 return VINF_SUCCESS;
5149}
5150
5151
5152/**
5153 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
5154 *
5155 * @param iCrReg The CRx register to write (valid).
5156 * @param uNewCrX The new value.
5157 * @param enmAccessCrx The instruction that caused the CrX load.
5158 * @param iGReg The general register in case of a 'mov CRx,GReg'
5159 * instruction.
5160 */
5161IEM_CIMPL_DEF_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg)
5162{
5163 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5164 VBOXSTRICTRC rcStrict;
5165 int rc;
5166#ifndef VBOX_WITH_NESTED_HWVIRT_SVM
5167 RT_NOREF2(iGReg, enmAccessCrX);
5168#endif
5169
5170 /*
5171 * Try store it.
5172 * Unfortunately, CPUM only does a tiny bit of the work.
5173 */
5174 switch (iCrReg)
5175 {
5176 case 0:
5177 {
5178 /*
5179 * Perform checks.
5180 */
5181 uint64_t const uOldCrX = pCtx->cr0;
5182 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
5183 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
5184 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
5185
5186 /* ET is hardcoded on 486 and later. */
5187 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_486)
5188 uNewCrX |= X86_CR0_ET;
5189 /* The 386 and 486 didn't #GP(0) on attempting to set reserved CR0 bits. ET was settable on 386. */
5190 else if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_486)
5191 {
5192 uNewCrX &= fValid;
5193 uNewCrX |= X86_CR0_ET;
5194 }
5195 else
5196 uNewCrX &= X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG | X86_CR0_ET;
5197
5198 /* Check for reserved bits. */
5199 if (uNewCrX & ~(uint64_t)fValid)
5200 {
5201 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5202 return iemRaiseGeneralProtectionFault0(pVCpu);
5203 }
5204
5205 /* Check for invalid combinations. */
5206 if ( (uNewCrX & X86_CR0_PG)
5207 && !(uNewCrX & X86_CR0_PE) )
5208 {
5209 Log(("Trying to set CR0.PG without CR0.PE\n"));
5210 return iemRaiseGeneralProtectionFault0(pVCpu);
5211 }
5212
5213 if ( !(uNewCrX & X86_CR0_CD)
5214 && (uNewCrX & X86_CR0_NW) )
5215 {
5216 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
5217 return iemRaiseGeneralProtectionFault0(pVCpu);
5218 }
5219
5220 if ( !(uNewCrX & X86_CR0_PG)
5221 && (pCtx->cr4 & X86_CR4_PCIDE))
5222 {
5223 Log(("Trying to clear CR0.PG while leaving CR4.PCID set\n"));
5224 return iemRaiseGeneralProtectionFault0(pVCpu);
5225 }
5226
5227 /* Long mode consistency checks. */
5228 if ( (uNewCrX & X86_CR0_PG)
5229 && !(uOldCrX & X86_CR0_PG)
5230 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
5231 {
5232 if (!(pCtx->cr4 & X86_CR4_PAE))
5233 {
5234 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
5235 return iemRaiseGeneralProtectionFault0(pVCpu);
5236 }
5237 if (pCtx->cs.Attr.n.u1Long)
5238 {
5239 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
5240 return iemRaiseGeneralProtectionFault0(pVCpu);
5241 }
5242 }
5243
5244 /** @todo check reserved PDPTR bits as AMD states. */
5245
5246 /*
5247 * SVM nested-guest CR0 write intercepts.
5248 */
5249 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, iCrReg))
5250 {
5251 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5252 IEM_SVM_UPDATE_NRIP(pVCpu);
5253 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);
5254 }
5255 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
5256 {
5257 /* 'lmsw' intercepts regardless of whether the TS/MP bits are actually toggled. */
5258 if ( enmAccessCrX == IEMACCESSCRX_LMSW
5259 || (uNewCrX & ~(X86_CR0_TS | X86_CR0_MP)) != (uOldCrX & ~(X86_CR0_TS | X86_CR0_MP)))
5260 {
5261 Assert(enmAccessCrX != IEMACCESSCRX_CLTS);
5262 Log(("iemCImpl_load_Cr%#x: lmsw or bits other than TS/MP changed: Guest intercept -> #VMEXIT\n", iCrReg));
5263 IEM_SVM_UPDATE_NRIP(pVCpu);
5264 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_CR0_SEL_WRITE, enmAccessCrX, iGReg);
5265 }
5266 }
5267
5268 /*
5269 * Change CR0.
5270 */
5271 if (!IEM_VERIFICATION_ENABLED(pVCpu))
5272 CPUMSetGuestCR0(pVCpu, uNewCrX);
5273 else
5274 pCtx->cr0 = uNewCrX;
5275 Assert(pCtx->cr0 == uNewCrX);
5276
5277 /*
5278 * Change EFER.LMA if entering or leaving long mode.
5279 */
5280 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
5281 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
5282 {
5283 uint64_t NewEFER = pCtx->msrEFER;
5284 if (uNewCrX & X86_CR0_PG)
5285 NewEFER |= MSR_K6_EFER_LMA;
5286 else
5287 NewEFER &= ~MSR_K6_EFER_LMA;
5288
5289 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5290 CPUMSetGuestEFER(pVCpu, NewEFER);
5291 else
5292 pCtx->msrEFER = NewEFER;
5293 Assert(pCtx->msrEFER == NewEFER);
5294 }
5295
5296 /*
5297 * Inform PGM.
5298 */
5299 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5300 {
5301 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
5302 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
5303 {
5304 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
5305 AssertRCReturn(rc, rc);
5306 /* ignore informational status codes */
5307 }
5308 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
5309 }
5310 else
5311 rcStrict = VINF_SUCCESS;
5312
5313#ifdef IN_RC
5314 /* Return to ring-3 for rescheduling if WP or AM changes. */
5315 if ( rcStrict == VINF_SUCCESS
5316 && ( (uNewCrX & (X86_CR0_WP | X86_CR0_AM))
5317 != (uOldCrX & (X86_CR0_WP | X86_CR0_AM))) )
5318 rcStrict = VINF_EM_RESCHEDULE;
5319#endif
5320 break;
5321 }
5322
5323 /*
5324 * CR2 can be changed without any restrictions.
5325 */
5326 case 2:
5327 {
5328 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 2))
5329 {
5330 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5331 IEM_SVM_UPDATE_NRIP(pVCpu);
5332 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);
5333 }
5334 pCtx->cr2 = uNewCrX;
5335 rcStrict = VINF_SUCCESS;
5336 break;
5337 }
5338
5339 /*
5340 * CR3 is relatively simple, although AMD and Intel have different
5341 * accounts of how setting reserved bits are handled. We take intel's
5342 * word for the lower bits and AMD's for the high bits (63:52). The
5343 * lower reserved bits are ignored and left alone; OpenBSD 5.8 relies
5344 * on this.
5345 */
5346 /** @todo Testcase: Setting reserved bits in CR3, especially before
5347 * enabling paging. */
5348 case 3:
5349 {
5350 /* clear bit 63 from the source operand and indicate no invalidations are required. */
5351 if ( (pCtx->cr4 & X86_CR4_PCIDE)
5352 && (uNewCrX & RT_BIT_64(63)))
5353 {
5354 /** @todo r=ramshankar: avoiding a TLB flush altogether here causes Windows 10
5355 * SMP(w/o nested-paging) to hang during bootup on Skylake systems, see
5356 * Intel spec. 4.10.4.1 "Operations that Invalidate TLBs and
5357 * Paging-Structure Caches". */
5358 uNewCrX &= ~RT_BIT_64(63);
5359 }
5360
5361 /* check / mask the value. */
5362 if (uNewCrX & UINT64_C(0xfff0000000000000))
5363 {
5364 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
5365 return iemRaiseGeneralProtectionFault0(pVCpu);
5366 }
5367
5368 uint64_t fValid;
5369 if ( (pCtx->cr4 & X86_CR4_PAE)
5370 && (pCtx->msrEFER & MSR_K6_EFER_LME))
5371 fValid = UINT64_C(0x000fffffffffffff);
5372 else
5373 fValid = UINT64_C(0xffffffff);
5374 if (uNewCrX & ~fValid)
5375 {
5376 Log(("Automatically clearing reserved MBZ bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
5377 uNewCrX, uNewCrX & ~fValid));
5378 uNewCrX &= fValid;
5379 }
5380
5381 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 3))
5382 {
5383 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5384 IEM_SVM_UPDATE_NRIP(pVCpu);
5385 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);
5386 }
5387
5388 /** @todo If we're in PAE mode we should check the PDPTRs for
5389 * invalid bits. */
5390
5391 /* Make the change. */
5392 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5393 {
5394 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
5395 AssertRCSuccessReturn(rc, rc);
5396 }
5397 else
5398 pCtx->cr3 = uNewCrX;
5399
5400 /* Inform PGM. */
5401 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5402 {
5403 if (pCtx->cr0 & X86_CR0_PG)
5404 {
5405 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
5406 AssertRCReturn(rc, rc);
5407 /* ignore informational status codes */
5408 }
5409 }
5410 rcStrict = VINF_SUCCESS;
5411 break;
5412 }
5413
5414 /*
5415 * CR4 is a bit more tedious as there are bits which cannot be cleared
5416 * under some circumstances and such.
5417 */
5418 case 4:
5419 {
5420 uint64_t const uOldCrX = pCtx->cr4;
5421
5422 /** @todo Shouldn't this look at the guest CPUID bits to determine
5423 * valid bits? e.g. if guest CPUID doesn't allow X86_CR4_OSXMMEEXCPT, we
5424 * should #GP(0). */
5425 /* reserved bits */
5426 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
5427 | X86_CR4_TSD | X86_CR4_DE
5428 | X86_CR4_PSE | X86_CR4_PAE
5429 | X86_CR4_MCE | X86_CR4_PGE
5430 | X86_CR4_PCE | X86_CR4_OSFXSR
5431 | X86_CR4_OSXMMEEXCPT;
5432 //if (xxx)
5433 // fValid |= X86_CR4_VMXE;
5434 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fXSaveRstor)
5435 fValid |= X86_CR4_OSXSAVE;
5436 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fPcid)
5437 fValid |= X86_CR4_PCIDE;
5438 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase)
5439 fValid |= X86_CR4_FSGSBASE;
5440 if (uNewCrX & ~(uint64_t)fValid)
5441 {
5442 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5443 return iemRaiseGeneralProtectionFault0(pVCpu);
5444 }
5445
5446 bool const fPcide = ((uNewCrX ^ uOldCrX) & X86_CR4_PCIDE) && (uNewCrX & X86_CR4_PCIDE);
5447 bool const fLongMode = CPUMIsGuestInLongModeEx(pCtx);
5448
5449 /* PCIDE check. */
5450 if ( fPcide
5451 && ( !fLongMode
5452 || (pCtx->cr3 & UINT64_C(0xfff))))
5453 {
5454 Log(("Trying to set PCIDE with invalid PCID or outside long mode. Pcid=%#x\n", (pCtx->cr3 & UINT64_C(0xfff))));
5455 return iemRaiseGeneralProtectionFault0(pVCpu);
5456 }
5457
5458 /* PAE check. */
5459 if ( fLongMode
5460 && (uOldCrX & X86_CR4_PAE)
5461 && !(uNewCrX & X86_CR4_PAE))
5462 {
5463 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
5464 return iemRaiseGeneralProtectionFault0(pVCpu);
5465 }
5466
5467 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 4))
5468 {
5469 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5470 IEM_SVM_UPDATE_NRIP(pVCpu);
5471 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
5472 }
5473
5474 /*
5475 * Change it.
5476 */
5477 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5478 {
5479 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
5480 AssertRCSuccessReturn(rc, rc);
5481 }
5482 else
5483 pCtx->cr4 = uNewCrX;
5484 Assert(pCtx->cr4 == uNewCrX);
5485
5486 /*
5487 * Notify SELM and PGM.
5488 */
5489 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5490 {
5491 /* SELM - VME may change things wrt to the TSS shadowing. */
5492 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
5493 {
5494 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
5495 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
5496#ifdef VBOX_WITH_RAW_MODE
5497 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
5498 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
5499#endif
5500 }
5501
5502 /* PGM - flushing and mode. */
5503 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */))
5504 {
5505 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
5506 AssertRCReturn(rc, rc);
5507 /* ignore informational status codes */
5508 }
5509 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
5510 }
5511 else
5512 rcStrict = VINF_SUCCESS;
5513 break;
5514 }
5515
5516 /*
5517 * CR8 maps to the APIC TPR.
5518 */
5519 case 8:
5520 {
5521 if (uNewCrX & ~(uint64_t)0xf)
5522 {
5523 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
5524 return iemRaiseGeneralProtectionFault0(pVCpu);
5525 }
5526
5527#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5528 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5529 {
5530 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 8))
5531 {
5532 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5533 IEM_SVM_UPDATE_NRIP(pVCpu);
5534 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);
5535 }
5536
5537 PSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
5538 pVmcbCtrl->IntCtrl.n.u8VTPR = uNewCrX;
5539 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
5540 {
5541 rcStrict = VINF_SUCCESS;
5542 break;
5543 }
5544 }
5545#endif
5546 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
5547 {
5548 uint8_t const u8Tpr = (uint8_t)uNewCrX << 4;
5549 APICSetTpr(pVCpu, u8Tpr);
5550 }
5551 rcStrict = VINF_SUCCESS;
5552 break;
5553 }
5554
5555 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5556 }
5557
5558 /*
5559 * Advance the RIP on success.
5560 */
5561 if (RT_SUCCESS(rcStrict))
5562 {
5563 if (rcStrict != VINF_SUCCESS)
5564 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5565 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5566 }
5567
5568 return rcStrict;
5569}
5570
5571
5572/**
5573 * Implements mov CRx,GReg.
5574 *
5575 * @param iCrReg The CRx register to write (valid).
5576 * @param iGReg The general register to load the DRx value from.
5577 */
5578IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
5579{
5580 if (pVCpu->iem.s.uCpl != 0)
5581 return iemRaiseGeneralProtectionFault0(pVCpu);
5582 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
5583
5584 /*
5585 * Read the new value from the source register and call common worker.
5586 */
5587 uint64_t uNewCrX;
5588 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5589 uNewCrX = iemGRegFetchU64(pVCpu, iGReg);
5590 else
5591 uNewCrX = iemGRegFetchU32(pVCpu, iGReg);
5592 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, iCrReg, uNewCrX, IEMACCESSCRX_MOV_CRX, iGReg);
5593}
5594
5595
5596/**
5597 * Implements 'LMSW r/m16'
5598 *
5599 * @param u16NewMsw The new value.
5600 */
5601IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
5602{
5603 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5604
5605 if (pVCpu->iem.s.uCpl != 0)
5606 return iemRaiseGeneralProtectionFault0(pVCpu);
5607 Assert(!pCtx->eflags.Bits.u1VM);
5608
5609 /*
5610 * Compose the new CR0 value and call common worker.
5611 */
5612 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5613 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
5614 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */);
5615}
5616
5617
5618/**
5619 * Implements 'CLTS'.
5620 */
5621IEM_CIMPL_DEF_0(iemCImpl_clts)
5622{
5623 if (pVCpu->iem.s.uCpl != 0)
5624 return iemRaiseGeneralProtectionFault0(pVCpu);
5625
5626 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5627 uint64_t uNewCr0 = pCtx->cr0;
5628 uNewCr0 &= ~X86_CR0_TS;
5629 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_CLTS, UINT8_MAX /* iGReg */);
5630}
5631
5632
5633/**
5634 * Implements mov GReg,DRx.
5635 *
5636 * @param iGReg The general register to store the DRx value in.
5637 * @param iDrReg The DRx register to read (0-7).
5638 */
5639IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
5640{
5641 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5642
5643 /*
5644 * Check preconditions.
5645 */
5646
5647 /* Raise GPs. */
5648 if (pVCpu->iem.s.uCpl != 0)
5649 return iemRaiseGeneralProtectionFault0(pVCpu);
5650 Assert(!pCtx->eflags.Bits.u1VM);
5651
5652 if ( (iDrReg == 4 || iDrReg == 5)
5653 && (pCtx->cr4 & X86_CR4_DE) )
5654 {
5655 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
5656 return iemRaiseGeneralProtectionFault0(pVCpu);
5657 }
5658
5659 /* Raise #DB if general access detect is enabled. */
5660 if (pCtx->dr[7] & X86_DR7_GD)
5661 {
5662 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
5663 return iemRaiseDebugException(pVCpu);
5664 }
5665
5666 /*
5667 * Read the debug register and store it in the specified general register.
5668 */
5669 uint64_t drX;
5670 switch (iDrReg)
5671 {
5672 case 0: drX = pCtx->dr[0]; break;
5673 case 1: drX = pCtx->dr[1]; break;
5674 case 2: drX = pCtx->dr[2]; break;
5675 case 3: drX = pCtx->dr[3]; break;
5676 case 6:
5677 case 4:
5678 drX = pCtx->dr[6];
5679 drX |= X86_DR6_RA1_MASK;
5680 drX &= ~X86_DR6_RAZ_MASK;
5681 break;
5682 case 7:
5683 case 5:
5684 drX = pCtx->dr[7];
5685 drX |=X86_DR7_RA1_MASK;
5686 drX &= ~X86_DR7_RAZ_MASK;
5687 break;
5688 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5689 }
5690
5691 /** @todo SVM nested-guest intercept for DR8-DR15? */
5692 /*
5693 * Check for any SVM nested-guest intercepts for the DRx read.
5694 */
5695 if (IEM_IS_SVM_READ_DR_INTERCEPT_SET(pVCpu, iDrReg))
5696 {
5697 Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg));
5698 IEM_SVM_UPDATE_NRIP(pVCpu);
5699 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),
5700 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
5701 }
5702
5703 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5704 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = drX;
5705 else
5706 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)drX;
5707
5708 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5709 return VINF_SUCCESS;
5710}
5711
5712
5713/**
5714 * Implements mov DRx,GReg.
5715 *
5716 * @param iDrReg The DRx register to write (valid).
5717 * @param iGReg The general register to load the DRx value from.
5718 */
5719IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
5720{
5721 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5722
5723 /*
5724 * Check preconditions.
5725 */
5726 if (pVCpu->iem.s.uCpl != 0)
5727 return iemRaiseGeneralProtectionFault0(pVCpu);
5728 Assert(!pCtx->eflags.Bits.u1VM);
5729
5730 if (iDrReg == 4 || iDrReg == 5)
5731 {
5732 if (pCtx->cr4 & X86_CR4_DE)
5733 {
5734 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
5735 return iemRaiseGeneralProtectionFault0(pVCpu);
5736 }
5737 iDrReg += 2;
5738 }
5739
5740 /* Raise #DB if general access detect is enabled. */
5741 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
5742 * \#GP? */
5743 if (pCtx->dr[7] & X86_DR7_GD)
5744 {
5745 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
5746 return iemRaiseDebugException(pVCpu);
5747 }
5748
5749 /*
5750 * Read the new value from the source register.
5751 */
5752 uint64_t uNewDrX;
5753 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5754 uNewDrX = iemGRegFetchU64(pVCpu, iGReg);
5755 else
5756 uNewDrX = iemGRegFetchU32(pVCpu, iGReg);
5757
5758 /*
5759 * Adjust it.
5760 */
5761 switch (iDrReg)
5762 {
5763 case 0:
5764 case 1:
5765 case 2:
5766 case 3:
5767 /* nothing to adjust */
5768 break;
5769
5770 case 6:
5771 if (uNewDrX & X86_DR6_MBZ_MASK)
5772 {
5773 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5774 return iemRaiseGeneralProtectionFault0(pVCpu);
5775 }
5776 uNewDrX |= X86_DR6_RA1_MASK;
5777 uNewDrX &= ~X86_DR6_RAZ_MASK;
5778 break;
5779
5780 case 7:
5781 if (uNewDrX & X86_DR7_MBZ_MASK)
5782 {
5783 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
5784 return iemRaiseGeneralProtectionFault0(pVCpu);
5785 }
5786 uNewDrX |= X86_DR7_RA1_MASK;
5787 uNewDrX &= ~X86_DR7_RAZ_MASK;
5788 break;
5789
5790 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5791 }
5792
5793 /** @todo SVM nested-guest intercept for DR8-DR15? */
5794 /*
5795 * Check for any SVM nested-guest intercepts for the DRx write.
5796 */
5797 if (IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(pVCpu, iDrReg))
5798 {
5799 Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg));
5800 IEM_SVM_UPDATE_NRIP(pVCpu);
5801 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),
5802 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
5803 }
5804
5805 /*
5806 * Do the actual setting.
5807 */
5808 if (!IEM_VERIFICATION_ENABLED(pVCpu))
5809 {
5810 int rc = CPUMSetGuestDRx(pVCpu, iDrReg, uNewDrX);
5811 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc);
5812 }
5813 else
5814 pCtx->dr[iDrReg] = uNewDrX;
5815
5816 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5817 return VINF_SUCCESS;
5818}
5819
5820
5821/**
5822 * Implements 'INVLPG m'.
5823 *
5824 * @param GCPtrPage The effective address of the page to invalidate.
5825 * @remarks Updates the RIP.
5826 */
5827IEM_CIMPL_DEF_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage)
5828{
5829 /* ring-0 only. */
5830 if (pVCpu->iem.s.uCpl != 0)
5831 return iemRaiseGeneralProtectionFault0(pVCpu);
5832 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);
5833
5834 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
5835 {
5836 Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
5837 IEM_SVM_UPDATE_NRIP(pVCpu);
5838 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_INVLPG,
5839 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? GCPtrPage : 0, 0 /* uExitInfo2 */);
5840 }
5841
5842 int rc = PGMInvalidatePage(pVCpu, GCPtrPage);
5843 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5844
5845 if (rc == VINF_SUCCESS)
5846 return VINF_SUCCESS;
5847 if (rc == VINF_PGM_SYNC_CR3)
5848 return iemSetPassUpStatus(pVCpu, rc);
5849
5850 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
5851 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", GCPtrPage, rc));
5852 return rc;
5853}
5854
5855
5856/**
5857 * Implements INVPCID.
5858 *
5859 * @param uInvpcidType The invalidation type.
5860 * @param GCPtrInvpcidDesc The effective address of invpcid descriptor.
5861 * @remarks Updates the RIP.
5862 */
5863IEM_CIMPL_DEF_2(iemCImpl_invpcid, uint64_t, uInvpcidType, RTGCPTR, GCPtrInvpcidDesc)
5864{
5865 /*
5866 * Check preconditions.
5867 */
5868 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fInvpcid)
5869 return iemRaiseUndefinedOpcode(pVCpu);
5870 if (pVCpu->iem.s.uCpl != 0)
5871 {
5872 Log(("invpcid: CPL != 0 -> #GP(0)\n"));
5873 return iemRaiseGeneralProtectionFault0(pVCpu);
5874 }
5875 if (IEM_IS_V86_MODE(pVCpu))
5876 {
5877 Log(("invpcid: v8086 mode -> #GP(0)\n"));
5878 return iemRaiseGeneralProtectionFault0(pVCpu);
5879 }
5880 if (uInvpcidType > X86_INVPCID_TYPE_MAX_VALID)
5881 {
5882 Log(("invpcid: invalid/unrecognized invpcid type %#x -> #GP(0)\n", uInvpcidType));
5883 return iemRaiseGeneralProtectionFault0(pVCpu);
5884 }
5885
5886 /*
5887 * Fetch the invpcid descriptor from guest memory.
5888 */
5889 RTUINT128U uDesc;
5890 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, pVCpu->iem.s.iEffSeg, GCPtrInvpcidDesc);
5891 if (rcStrict == VINF_SUCCESS)
5892 {
5893 /*
5894 * Validate the descriptor.
5895 */
5896 if (uDesc.s.Lo > 0xfff)
5897 {
5898 Log(("invpcid: reserved bits set in invpcid descriptor %#RX64 -> #GP(0)\n", uDesc.s.Lo));
5899 return iemRaiseGeneralProtectionFault0(pVCpu);
5900 }
5901
5902 RTGCUINTPTR64 const GCPtrInvAddr = uDesc.s.Hi;
5903 uint8_t const uPcid = uDesc.s.Lo & UINT64_C(0xfff);
5904 uint32_t const uCr4 = IEM_GET_CTX(pVCpu)->cr4;
5905 uint64_t const uCr3 = IEM_GET_CTX(pVCpu)->cr3;
5906 switch (uInvpcidType)
5907 {
5908 case X86_INVPCID_TYPE_INDV_ADDR:
5909 {
5910 if (!IEM_IS_CANONICAL(GCPtrInvAddr))
5911 {
5912 Log(("invpcid: invalidation address %#RGP is not canonical -> #GP(0)\n", GCPtrInvAddr));
5913 return iemRaiseGeneralProtectionFault0(pVCpu);
5914 }
5915 if ( !(uCr4 & X86_CR4_PCIDE)
5916 && uPcid != 0)
5917 {
5918 Log(("invpcid: invalid pcid %#x\n", uPcid));
5919 return iemRaiseGeneralProtectionFault0(pVCpu);
5920 }
5921
5922 /* Invalidate mappings for the linear address tagged with PCID except global translations. */
5923 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
5924 break;
5925 }
5926
5927 case X86_INVPCID_TYPE_SINGLE_CONTEXT:
5928 {
5929 if ( !(uCr4 & X86_CR4_PCIDE)
5930 && uPcid != 0)
5931 {
5932 Log(("invpcid: invalid pcid %#x\n", uPcid));
5933 return iemRaiseGeneralProtectionFault0(pVCpu);
5934 }
5935 /* Invalidate all mappings associated with PCID except global translations. */
5936 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
5937 break;
5938 }
5939
5940 case X86_INVPCID_TYPE_ALL_CONTEXT_INCL_GLOBAL:
5941 {
5942 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
5943 break;
5944 }
5945
5946 case X86_INVPCID_TYPE_ALL_CONTEXT_EXCL_GLOBAL:
5947 {
5948 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
5949 break;
5950 }
5951 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5952 }
5953 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5954 }
5955 return rcStrict;
5956}
5957
5958
5959/**
5960 * Implements RDTSC.
5961 */
5962IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
5963{
5964 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5965
5966 /*
5967 * Check preconditions.
5968 */
5969 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fTsc)
5970 return iemRaiseUndefinedOpcode(pVCpu);
5971
5972 if ( (pCtx->cr4 & X86_CR4_TSD)
5973 && pVCpu->iem.s.uCpl != 0)
5974 {
5975 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5976 return iemRaiseGeneralProtectionFault0(pVCpu);
5977 }
5978
5979 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
5980 {
5981 Log(("rdtsc: Guest intercept -> #VMEXIT\n"));
5982 IEM_SVM_UPDATE_NRIP(pVCpu);
5983 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5984 }
5985
5986 /*
5987 * Do the job.
5988 */
5989 uint64_t uTicks = TMCpuTickGet(pVCpu);
5990#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5991 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
5992#endif
5993 pCtx->rax = RT_LO_U32(uTicks);
5994 pCtx->rdx = RT_HI_U32(uTicks);
5995#ifdef IEM_VERIFICATION_MODE_FULL
5996 pVCpu->iem.s.fIgnoreRaxRdx = true;
5997#endif
5998
5999 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6000 return VINF_SUCCESS;
6001}
6002
6003
6004/**
6005 * Implements RDTSC.
6006 */
6007IEM_CIMPL_DEF_0(iemCImpl_rdtscp)
6008{
6009 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6010
6011 /*
6012 * Check preconditions.
6013 */
6014 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fRdTscP)
6015 return iemRaiseUndefinedOpcode(pVCpu);
6016
6017 if ( (pCtx->cr4 & X86_CR4_TSD)
6018 && pVCpu->iem.s.uCpl != 0)
6019 {
6020 Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
6021 return iemRaiseGeneralProtectionFault0(pVCpu);
6022 }
6023
6024 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
6025 {
6026 Log(("rdtscp: Guest intercept -> #VMEXIT\n"));
6027 IEM_SVM_UPDATE_NRIP(pVCpu);
6028 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6029 }
6030
6031 /*
6032 * Do the job.
6033 * Query the MSR first in case of trips to ring-3.
6034 */
6035 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx);
6036 if (rcStrict == VINF_SUCCESS)
6037 {
6038 /* Low dword of the TSC_AUX msr only. */
6039 pCtx->rcx &= UINT32_C(0xffffffff);
6040
6041 uint64_t uTicks = TMCpuTickGet(pVCpu);
6042#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6043 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
6044#endif
6045 pCtx->rax = RT_LO_U32(uTicks);
6046 pCtx->rdx = RT_HI_U32(uTicks);
6047#ifdef IEM_VERIFICATION_MODE_FULL
6048 pVCpu->iem.s.fIgnoreRaxRdx = true;
6049#endif
6050 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6051 }
6052 return rcStrict;
6053}
6054
6055
6056/**
6057 * Implements RDPMC.
6058 */
6059IEM_CIMPL_DEF_0(iemCImpl_rdpmc)
6060{
6061 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6062 if ( pVCpu->iem.s.uCpl != 0
6063 && !(pCtx->cr4 & X86_CR4_PCE))
6064 return iemRaiseGeneralProtectionFault0(pVCpu);
6065
6066 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
6067 {
6068 Log(("rdpmc: Guest intercept -> #VMEXIT\n"));
6069 IEM_SVM_UPDATE_NRIP(pVCpu);
6070 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6071 }
6072
6073 /** @todo Implement RDPMC for the regular guest execution case (the above only
6074 * handles nested-guest intercepts). */
6075 RT_NOREF(cbInstr);
6076 return VERR_IEM_INSTR_NOT_IMPLEMENTED;
6077}
6078
6079
6080/**
6081 * Implements RDMSR.
6082 */
6083IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
6084{
6085 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6086
6087 /*
6088 * Check preconditions.
6089 */
6090 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
6091 return iemRaiseUndefinedOpcode(pVCpu);
6092 if (pVCpu->iem.s.uCpl != 0)
6093 return iemRaiseGeneralProtectionFault0(pVCpu);
6094
6095 /*
6096 * Do the job.
6097 */
6098 RTUINT64U uValue;
6099 VBOXSTRICTRC rcStrict;
6100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6101 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
6102 {
6103 rcStrict = iemSvmHandleMsrIntercept(pVCpu, pCtx, pCtx->ecx, false /* fWrite */);
6104 if (rcStrict == VINF_SVM_VMEXIT)
6105 return VINF_SUCCESS;
6106 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
6107 {
6108 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pCtx->ecx, VBOXSTRICTRC_VAL(rcStrict)));
6109 return rcStrict;
6110 }
6111 }
6112#endif
6113
6114 rcStrict = CPUMQueryGuestMsr(pVCpu, pCtx->ecx, &uValue.u);
6115 if (rcStrict == VINF_SUCCESS)
6116 {
6117 pCtx->rax = uValue.s.Lo;
6118 pCtx->rdx = uValue.s.Hi;
6119
6120 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6121 return VINF_SUCCESS;
6122 }
6123
6124#ifndef IN_RING3
6125 /* Deferred to ring-3. */
6126 if (rcStrict == VINF_CPUM_R3_MSR_READ)
6127 {
6128 Log(("IEM: rdmsr(%#x) -> ring-3\n", pCtx->ecx));
6129 return rcStrict;
6130 }
6131#else /* IN_RING3 */
6132 /* Often a unimplemented MSR or MSR bit, so worth logging. */
6133 static uint32_t s_cTimes = 0;
6134 if (s_cTimes++ < 10)
6135 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
6136 else
6137#endif
6138 Log(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx));
6139 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
6140 return iemRaiseGeneralProtectionFault0(pVCpu);
6141}
6142
6143
6144/**
6145 * Implements WRMSR.
6146 */
6147IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
6148{
6149 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6150
6151 /*
6152 * Check preconditions.
6153 */
6154 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
6155 return iemRaiseUndefinedOpcode(pVCpu);
6156 if (pVCpu->iem.s.uCpl != 0)
6157 return iemRaiseGeneralProtectionFault0(pVCpu);
6158
6159 /*
6160 * Do the job.
6161 */
6162 RTUINT64U uValue;
6163 uValue.s.Lo = pCtx->eax;
6164 uValue.s.Hi = pCtx->edx;
6165
6166 VBOXSTRICTRC rcStrict;
6167#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6168 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
6169 {
6170 rcStrict = iemSvmHandleMsrIntercept(pVCpu, pCtx, pCtx->ecx, true /* fWrite */);
6171 if (rcStrict == VINF_SVM_VMEXIT)
6172 return VINF_SUCCESS;
6173 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
6174 {
6175 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pCtx->ecx, VBOXSTRICTRC_VAL(rcStrict)));
6176 return rcStrict;
6177 }
6178 }
6179#endif
6180
6181 if (!IEM_VERIFICATION_ENABLED(pVCpu))
6182 rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u);
6183 else
6184 {
6185#ifdef IN_RING3
6186 CPUMCTX CtxTmp = *pCtx;
6187 rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u);
6188 PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr(pVCpu);
6189 *pCtx = *pCtx2;
6190 *pCtx2 = CtxTmp;
6191#else
6192 AssertReleaseFailedReturn(VERR_IEM_IPE_2);
6193#endif
6194 }
6195 if (rcStrict == VINF_SUCCESS)
6196 {
6197 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6198 return VINF_SUCCESS;
6199 }
6200
6201#ifndef IN_RING3
6202 /* Deferred to ring-3. */
6203 if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
6204 {
6205 Log(("IEM: wrmsr(%#x) -> ring-3\n", pCtx->ecx));
6206 return rcStrict;
6207 }
6208#else /* IN_RING3 */
6209 /* Often a unimplemented MSR or MSR bit, so worth logging. */
6210 static uint32_t s_cTimes = 0;
6211 if (s_cTimes++ < 10)
6212 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
6213 else
6214#endif
6215 Log(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
6216 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
6217 return iemRaiseGeneralProtectionFault0(pVCpu);
6218}
6219
6220
6221/**
6222 * Implements 'IN eAX, port'.
6223 *
6224 * @param u16Port The source port.
6225 * @param cbReg The register size.
6226 */
6227IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
6228{
6229 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6230
6231 /*
6232 * CPL check
6233 */
6234 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx, u16Port, cbReg);
6235 if (rcStrict != VINF_SUCCESS)
6236 return rcStrict;
6237
6238 /*
6239 * Check SVM nested-guest IO intercept.
6240 */
6241#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6242 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
6243 {
6244 uint8_t cAddrSizeBits;
6245 switch (pVCpu->iem.s.enmEffAddrMode)
6246 {
6247 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
6248 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
6249 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
6250 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6251 }
6252 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
6253 false /* fRep */, false /* fStrIo */, cbInstr);
6254 if (rcStrict == VINF_SVM_VMEXIT)
6255 return VINF_SUCCESS;
6256 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
6257 {
6258 Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
6259 VBOXSTRICTRC_VAL(rcStrict)));
6260 return rcStrict;
6261 }
6262 }
6263#endif
6264
6265 /*
6266 * Perform the I/O.
6267 */
6268 uint32_t u32Value;
6269 if (!IEM_VERIFICATION_ENABLED(pVCpu))
6270 rcStrict = IOMIOPortRead(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, &u32Value, cbReg);
6271 else
6272 rcStrict = iemVerifyFakeIOPortRead(pVCpu, u16Port, &u32Value, cbReg);
6273 if (IOM_SUCCESS(rcStrict))
6274 {
6275 switch (cbReg)
6276 {
6277 case 1: pCtx->al = (uint8_t)u32Value; break;
6278 case 2: pCtx->ax = (uint16_t)u32Value; break;
6279 case 4: pCtx->rax = u32Value; break;
6280 default: AssertFailedReturn(VERR_IEM_IPE_3);
6281 }
6282 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6283 pVCpu->iem.s.cPotentialExits++;
6284 if (rcStrict != VINF_SUCCESS)
6285 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6286 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
6287
6288 /*
6289 * Check for I/O breakpoints.
6290 */
6291 uint32_t const uDr7 = pCtx->dr[7];
6292 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
6293 && X86_DR7_ANY_RW_IO(uDr7)
6294 && (pCtx->cr4 & X86_CR4_DE))
6295 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM))))
6296 {
6297 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx, u16Port, cbReg);
6298 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6299 rcStrict = iemRaiseDebugException(pVCpu);
6300 }
6301 }
6302
6303 return rcStrict;
6304}
6305
6306
6307/**
6308 * Implements 'IN eAX, DX'.
6309 *
6310 * @param cbReg The register size.
6311 */
6312IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
6313{
6314 return IEM_CIMPL_CALL_2(iemCImpl_in, IEM_GET_CTX(pVCpu)->dx, cbReg);
6315}
6316
6317
6318/**
6319 * Implements 'OUT port, eAX'.
6320 *
6321 * @param u16Port The destination port.
6322 * @param cbReg The register size.
6323 */
6324IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
6325{
6326 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6327
6328 /*
6329 * CPL check
6330 */
6331 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx, u16Port, cbReg);
6332 if (rcStrict != VINF_SUCCESS)
6333 return rcStrict;
6334
6335 /*
6336 * Check SVM nested-guest IO intercept.
6337 */
6338#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6339 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
6340 {
6341 uint8_t cAddrSizeBits;
6342 switch (pVCpu->iem.s.enmEffAddrMode)
6343 {
6344 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
6345 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
6346 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
6347 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6348 }
6349 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
6350 false /* fRep */, false /* fStrIo */, cbInstr);
6351 if (rcStrict == VINF_SVM_VMEXIT)
6352 return VINF_SUCCESS;
6353 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
6354 {
6355 Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
6356 VBOXSTRICTRC_VAL(rcStrict)));
6357 return rcStrict;
6358 }
6359 }
6360#endif
6361
6362 /*
6363 * Perform the I/O.
6364 */
6365 uint32_t u32Value;
6366 switch (cbReg)
6367 {
6368 case 1: u32Value = pCtx->al; break;
6369 case 2: u32Value = pCtx->ax; break;
6370 case 4: u32Value = pCtx->eax; break;
6371 default: AssertFailedReturn(VERR_IEM_IPE_4);
6372 }
6373 if (!IEM_VERIFICATION_ENABLED(pVCpu))
6374 rcStrict = IOMIOPortWrite(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, u32Value, cbReg);
6375 else
6376 rcStrict = iemVerifyFakeIOPortWrite(pVCpu, u16Port, u32Value, cbReg);
6377 if (IOM_SUCCESS(rcStrict))
6378 {
6379 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6380 pVCpu->iem.s.cPotentialExits++;
6381 if (rcStrict != VINF_SUCCESS)
6382 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6383 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
6384
6385 /*
6386 * Check for I/O breakpoints.
6387 */
6388 uint32_t const uDr7 = pCtx->dr[7];
6389 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
6390 && X86_DR7_ANY_RW_IO(uDr7)
6391 && (pCtx->cr4 & X86_CR4_DE))
6392 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM))))
6393 {
6394 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx, u16Port, cbReg);
6395 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6396 rcStrict = iemRaiseDebugException(pVCpu);
6397 }
6398 }
6399 return rcStrict;
6400}
6401
6402
6403/**
6404 * Implements 'OUT DX, eAX'.
6405 *
6406 * @param cbReg The register size.
6407 */
6408IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
6409{
6410 return IEM_CIMPL_CALL_2(iemCImpl_out, IEM_GET_CTX(pVCpu)->dx, cbReg);
6411}
6412
6413
6414/**
6415 * Implements 'CLI'.
6416 */
6417IEM_CIMPL_DEF_0(iemCImpl_cli)
6418{
6419 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6420 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
6421 uint32_t const fEflOld = fEfl;
6422 if (pCtx->cr0 & X86_CR0_PE)
6423 {
6424 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
6425 if (!(fEfl & X86_EFL_VM))
6426 {
6427 if (pVCpu->iem.s.uCpl <= uIopl)
6428 fEfl &= ~X86_EFL_IF;
6429 else if ( pVCpu->iem.s.uCpl == 3
6430 && (pCtx->cr4 & X86_CR4_PVI) )
6431 fEfl &= ~X86_EFL_VIF;
6432 else
6433 return iemRaiseGeneralProtectionFault0(pVCpu);
6434 }
6435 /* V8086 */
6436 else if (uIopl == 3)
6437 fEfl &= ~X86_EFL_IF;
6438 else if ( uIopl < 3
6439 && (pCtx->cr4 & X86_CR4_VME) )
6440 fEfl &= ~X86_EFL_VIF;
6441 else
6442 return iemRaiseGeneralProtectionFault0(pVCpu);
6443 }
6444 /* real mode */
6445 else
6446 fEfl &= ~X86_EFL_IF;
6447
6448 /* Commit. */
6449 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
6450 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6451 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld);
6452 return VINF_SUCCESS;
6453}
6454
6455
6456/**
6457 * Implements 'STI'.
6458 */
6459IEM_CIMPL_DEF_0(iemCImpl_sti)
6460{
6461 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6462 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
6463 uint32_t const fEflOld = fEfl;
6464
6465 if (pCtx->cr0 & X86_CR0_PE)
6466 {
6467 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
6468 if (!(fEfl & X86_EFL_VM))
6469 {
6470 if (pVCpu->iem.s.uCpl <= uIopl)
6471 fEfl |= X86_EFL_IF;
6472 else if ( pVCpu->iem.s.uCpl == 3
6473 && (pCtx->cr4 & X86_CR4_PVI)
6474 && !(fEfl & X86_EFL_VIP) )
6475 fEfl |= X86_EFL_VIF;
6476 else
6477 return iemRaiseGeneralProtectionFault0(pVCpu);
6478 }
6479 /* V8086 */
6480 else if (uIopl == 3)
6481 fEfl |= X86_EFL_IF;
6482 else if ( uIopl < 3
6483 && (pCtx->cr4 & X86_CR4_VME)
6484 && !(fEfl & X86_EFL_VIP) )
6485 fEfl |= X86_EFL_VIF;
6486 else
6487 return iemRaiseGeneralProtectionFault0(pVCpu);
6488 }
6489 /* real mode */
6490 else
6491 fEfl |= X86_EFL_IF;
6492
6493 /* Commit. */
6494 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
6495 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6496 if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
6497 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
6498 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
6499 return VINF_SUCCESS;
6500}
6501
6502
6503/**
6504 * Implements 'HLT'.
6505 */
6506IEM_CIMPL_DEF_0(iemCImpl_hlt)
6507{
6508 if (pVCpu->iem.s.uCpl != 0)
6509 return iemRaiseGeneralProtectionFault0(pVCpu);
6510
6511 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_HLT))
6512 {
6513 Log2(("hlt: Guest intercept -> #VMEXIT\n"));
6514 IEM_SVM_UPDATE_NRIP(pVCpu);
6515 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6516 }
6517
6518 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6519 return VINF_EM_HALT;
6520}
6521
6522
6523/**
6524 * Implements 'MONITOR'.
6525 */
6526IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
6527{
6528 /*
6529 * Permission checks.
6530 */
6531 if (pVCpu->iem.s.uCpl != 0)
6532 {
6533 Log2(("monitor: CPL != 0\n"));
6534 return iemRaiseUndefinedOpcode(pVCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
6535 }
6536 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
6537 {
6538 Log2(("monitor: Not in CPUID\n"));
6539 return iemRaiseUndefinedOpcode(pVCpu);
6540 }
6541
6542 /*
6543 * Gather the operands and validate them.
6544 */
6545 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6546 RTGCPTR GCPtrMem = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
6547 uint32_t uEcx = pCtx->ecx;
6548 uint32_t uEdx = pCtx->edx;
6549/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
6550 * \#GP first. */
6551 if (uEcx != 0)
6552 {
6553 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx);
6554 return iemRaiseGeneralProtectionFault0(pVCpu);
6555 }
6556
6557 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
6558 if (rcStrict != VINF_SUCCESS)
6559 return rcStrict;
6560
6561 RTGCPHYS GCPhysMem;
6562 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
6563 if (rcStrict != VINF_SUCCESS)
6564 return rcStrict;
6565
6566 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
6567 {
6568 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
6569 IEM_SVM_UPDATE_NRIP(pVCpu);
6570 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6571 }
6572
6573 /*
6574 * Call EM to prepare the monitor/wait.
6575 */
6576 rcStrict = EMMonitorWaitPrepare(pVCpu, pCtx->rax, pCtx->rcx, pCtx->rdx, GCPhysMem);
6577 Assert(rcStrict == VINF_SUCCESS);
6578
6579 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6580 return rcStrict;
6581}
6582
6583
6584/**
6585 * Implements 'MWAIT'.
6586 */
6587IEM_CIMPL_DEF_0(iemCImpl_mwait)
6588{
6589 /*
6590 * Permission checks.
6591 */
6592 if (pVCpu->iem.s.uCpl != 0)
6593 {
6594 Log2(("mwait: CPL != 0\n"));
6595 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
6596 * EFLAGS.VM then.) */
6597 return iemRaiseUndefinedOpcode(pVCpu);
6598 }
6599 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
6600 {
6601 Log2(("mwait: Not in CPUID\n"));
6602 return iemRaiseUndefinedOpcode(pVCpu);
6603 }
6604
6605 /*
6606 * Gather the operands and validate them.
6607 */
6608 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6609 uint32_t uEax = pCtx->eax;
6610 uint32_t uEcx = pCtx->ecx;
6611 if (uEcx != 0)
6612 {
6613 /* Only supported extension is break on IRQ when IF=0. */
6614 if (uEcx > 1)
6615 {
6616 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
6617 return iemRaiseGeneralProtectionFault0(pVCpu);
6618 }
6619 uint32_t fMWaitFeatures = 0;
6620 uint32_t uIgnore = 0;
6621 CPUMGetGuestCpuId(pVCpu, 5, 0, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
6622 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
6623 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
6624 {
6625 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
6626 return iemRaiseGeneralProtectionFault0(pVCpu);
6627 }
6628 }
6629
6630 /*
6631 * Check SVM nested-guest mwait intercepts.
6632 */
6633 if ( IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT_ARMED)
6634 && EMMonitorIsArmed(pVCpu))
6635 {
6636 Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n"));
6637 IEM_SVM_UPDATE_NRIP(pVCpu);
6638 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6639 }
6640 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
6641 {
6642 Log2(("mwait: Guest intercept -> #VMEXIT\n"));
6643 IEM_SVM_UPDATE_NRIP(pVCpu);
6644 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6645 }
6646
6647 /*
6648 * Call EM to prepare the monitor/wait.
6649 */
6650 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(pVCpu, uEax, uEcx);
6651
6652 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6653 return rcStrict;
6654}
6655
6656
6657/**
6658 * Implements 'SWAPGS'.
6659 */
6660IEM_CIMPL_DEF_0(iemCImpl_swapgs)
6661{
6662 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); /* Caller checks this. */
6663
6664 /*
6665 * Permission checks.
6666 */
6667 if (pVCpu->iem.s.uCpl != 0)
6668 {
6669 Log2(("swapgs: CPL != 0\n"));
6670 return iemRaiseUndefinedOpcode(pVCpu);
6671 }
6672
6673 /*
6674 * Do the job.
6675 */
6676 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6677 uint64_t uOtherGsBase = pCtx->msrKERNELGSBASE;
6678 pCtx->msrKERNELGSBASE = pCtx->gs.u64Base;
6679 pCtx->gs.u64Base = uOtherGsBase;
6680
6681 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6682 return VINF_SUCCESS;
6683}
6684
6685
6686/**
6687 * Implements 'CPUID'.
6688 */
6689IEM_CIMPL_DEF_0(iemCImpl_cpuid)
6690{
6691 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6692
6693 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
6694 {
6695 Log2(("cpuid: Guest intercept -> #VMEXIT\n"));
6696 IEM_SVM_UPDATE_NRIP(pVCpu);
6697 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6698 }
6699
6700 CPUMGetGuestCpuId(pVCpu, pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
6701 pCtx->rax &= UINT32_C(0xffffffff);
6702 pCtx->rbx &= UINT32_C(0xffffffff);
6703 pCtx->rcx &= UINT32_C(0xffffffff);
6704 pCtx->rdx &= UINT32_C(0xffffffff);
6705
6706 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6707 return VINF_SUCCESS;
6708}
6709
6710
6711/**
6712 * Implements 'AAD'.
6713 *
6714 * @param bImm The immediate operand.
6715 */
6716IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
6717{
6718 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6719
6720 uint16_t const ax = pCtx->ax;
6721 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
6722 pCtx->ax = al;
6723 iemHlpUpdateArithEFlagsU8(pVCpu, al,
6724 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
6725 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
6726
6727 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6728 return VINF_SUCCESS;
6729}
6730
6731
6732/**
6733 * Implements 'AAM'.
6734 *
6735 * @param bImm The immediate operand. Cannot be 0.
6736 */
6737IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
6738{
6739 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6740 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
6741
6742 uint16_t const ax = pCtx->ax;
6743 uint8_t const al = (uint8_t)ax % bImm;
6744 uint8_t const ah = (uint8_t)ax / bImm;
6745 pCtx->ax = (ah << 8) + al;
6746 iemHlpUpdateArithEFlagsU8(pVCpu, al,
6747 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
6748 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
6749
6750 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6751 return VINF_SUCCESS;
6752}
6753
6754
6755/**
6756 * Implements 'DAA'.
6757 */
6758IEM_CIMPL_DEF_0(iemCImpl_daa)
6759{
6760 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6761
6762 uint8_t const al = pCtx->al;
6763 bool const fCarry = pCtx->eflags.Bits.u1CF;
6764
6765 if ( pCtx->eflags.Bits.u1AF
6766 || (al & 0xf) >= 10)
6767 {
6768 pCtx->al = al + 6;
6769 pCtx->eflags.Bits.u1AF = 1;
6770 }
6771 else
6772 pCtx->eflags.Bits.u1AF = 0;
6773
6774 if (al >= 0x9a || fCarry)
6775 {
6776 pCtx->al += 0x60;
6777 pCtx->eflags.Bits.u1CF = 1;
6778 }
6779 else
6780 pCtx->eflags.Bits.u1CF = 0;
6781
6782 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6783 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6784 return VINF_SUCCESS;
6785}
6786
6787
6788/**
6789 * Implements 'DAS'.
6790 */
6791IEM_CIMPL_DEF_0(iemCImpl_das)
6792{
6793 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6794
6795 uint8_t const uInputAL = pCtx->al;
6796 bool const fCarry = pCtx->eflags.Bits.u1CF;
6797
6798 if ( pCtx->eflags.Bits.u1AF
6799 || (uInputAL & 0xf) >= 10)
6800 {
6801 pCtx->eflags.Bits.u1AF = 1;
6802 if (uInputAL < 6)
6803 pCtx->eflags.Bits.u1CF = 1;
6804 pCtx->al = uInputAL - 6;
6805 }
6806 else
6807 {
6808 pCtx->eflags.Bits.u1AF = 0;
6809 pCtx->eflags.Bits.u1CF = 0;
6810 }
6811
6812 if (uInputAL >= 0x9a || fCarry)
6813 {
6814 pCtx->al -= 0x60;
6815 pCtx->eflags.Bits.u1CF = 1;
6816 }
6817
6818 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6819 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6820 return VINF_SUCCESS;
6821}
6822
6823
6824/**
6825 * Implements 'AAA'.
6826 */
6827IEM_CIMPL_DEF_0(iemCImpl_aaa)
6828{
6829 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6830
6831 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
6832 {
6833 if ( pCtx->eflags.Bits.u1AF
6834 || (pCtx->ax & 0xf) >= 10)
6835 {
6836 iemAImpl_add_u16(&pCtx->ax, 0x106, &pCtx->eflags.u32);
6837 pCtx->eflags.Bits.u1AF = 1;
6838 pCtx->eflags.Bits.u1CF = 1;
6839#ifdef IEM_VERIFICATION_MODE_FULL
6840 pVCpu->iem.s.fUndefinedEFlags |= X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF;
6841#endif
6842 }
6843 else
6844 {
6845 iemHlpUpdateArithEFlagsU16(pVCpu, pCtx->ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6846 pCtx->eflags.Bits.u1AF = 0;
6847 pCtx->eflags.Bits.u1CF = 0;
6848 }
6849 pCtx->ax &= UINT16_C(0xff0f);
6850 }
6851 else
6852 {
6853 if ( pCtx->eflags.Bits.u1AF
6854 || (pCtx->ax & 0xf) >= 10)
6855 {
6856 pCtx->ax += UINT16_C(0x106);
6857 pCtx->eflags.Bits.u1AF = 1;
6858 pCtx->eflags.Bits.u1CF = 1;
6859 }
6860 else
6861 {
6862 pCtx->eflags.Bits.u1AF = 0;
6863 pCtx->eflags.Bits.u1CF = 0;
6864 }
6865 pCtx->ax &= UINT16_C(0xff0f);
6866 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6867 }
6868
6869 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6870 return VINF_SUCCESS;
6871}
6872
6873
6874/**
6875 * Implements 'AAS'.
6876 */
6877IEM_CIMPL_DEF_0(iemCImpl_aas)
6878{
6879 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6880
6881 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
6882 {
6883 if ( pCtx->eflags.Bits.u1AF
6884 || (pCtx->ax & 0xf) >= 10)
6885 {
6886 iemAImpl_sub_u16(&pCtx->ax, 0x106, &pCtx->eflags.u32);
6887 pCtx->eflags.Bits.u1AF = 1;
6888 pCtx->eflags.Bits.u1CF = 1;
6889#ifdef IEM_VERIFICATION_MODE_FULL
6890 pVCpu->iem.s.fUndefinedEFlags |= X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF;
6891#endif
6892 }
6893 else
6894 {
6895 iemHlpUpdateArithEFlagsU16(pVCpu, pCtx->ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6896 pCtx->eflags.Bits.u1AF = 0;
6897 pCtx->eflags.Bits.u1CF = 0;
6898 }
6899 pCtx->ax &= UINT16_C(0xff0f);
6900 }
6901 else
6902 {
6903 if ( pCtx->eflags.Bits.u1AF
6904 || (pCtx->ax & 0xf) >= 10)
6905 {
6906 pCtx->ax -= UINT16_C(0x106);
6907 pCtx->eflags.Bits.u1AF = 1;
6908 pCtx->eflags.Bits.u1CF = 1;
6909 }
6910 else
6911 {
6912 pCtx->eflags.Bits.u1AF = 0;
6913 pCtx->eflags.Bits.u1CF = 0;
6914 }
6915 pCtx->ax &= UINT16_C(0xff0f);
6916 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
6917 }
6918
6919 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6920 return VINF_SUCCESS;
6921}
6922
6923
6924/**
6925 * Implements the 16-bit version of 'BOUND'.
6926 *
6927 * @note We have separate 16-bit and 32-bit variants of this function due to
6928 * the decoder using unsigned parameters, whereas we want signed one to
6929 * do the job. This is significant for a recompiler.
6930 */
6931IEM_CIMPL_DEF_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound)
6932{
6933 /*
6934 * Check if the index is inside the bounds, otherwise raise #BR.
6935 */
6936 if ( idxArray >= idxLowerBound
6937 && idxArray <= idxUpperBound)
6938 {
6939 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6940 return VINF_SUCCESS;
6941 }
6942
6943 return iemRaiseBoundRangeExceeded(pVCpu);
6944}
6945
6946
6947/**
6948 * Implements the 32-bit version of 'BOUND'.
6949 */
6950IEM_CIMPL_DEF_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound)
6951{
6952 /*
6953 * Check if the index is inside the bounds, otherwise raise #BR.
6954 */
6955 if ( idxArray >= idxLowerBound
6956 && idxArray <= idxUpperBound)
6957 {
6958 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6959 return VINF_SUCCESS;
6960 }
6961
6962 return iemRaiseBoundRangeExceeded(pVCpu);
6963}
6964
6965
6966
6967/*
6968 * Instantiate the various string operation combinations.
6969 */
6970#define OP_SIZE 8
6971#define ADDR_SIZE 16
6972#include "IEMAllCImplStrInstr.cpp.h"
6973#define OP_SIZE 8
6974#define ADDR_SIZE 32
6975#include "IEMAllCImplStrInstr.cpp.h"
6976#define OP_SIZE 8
6977#define ADDR_SIZE 64
6978#include "IEMAllCImplStrInstr.cpp.h"
6979
6980#define OP_SIZE 16
6981#define ADDR_SIZE 16
6982#include "IEMAllCImplStrInstr.cpp.h"
6983#define OP_SIZE 16
6984#define ADDR_SIZE 32
6985#include "IEMAllCImplStrInstr.cpp.h"
6986#define OP_SIZE 16
6987#define ADDR_SIZE 64
6988#include "IEMAllCImplStrInstr.cpp.h"
6989
6990#define OP_SIZE 32
6991#define ADDR_SIZE 16
6992#include "IEMAllCImplStrInstr.cpp.h"
6993#define OP_SIZE 32
6994#define ADDR_SIZE 32
6995#include "IEMAllCImplStrInstr.cpp.h"
6996#define OP_SIZE 32
6997#define ADDR_SIZE 64
6998#include "IEMAllCImplStrInstr.cpp.h"
6999
7000#define OP_SIZE 64
7001#define ADDR_SIZE 32
7002#include "IEMAllCImplStrInstr.cpp.h"
7003#define OP_SIZE 64
7004#define ADDR_SIZE 64
7005#include "IEMAllCImplStrInstr.cpp.h"
7006
7007
7008/**
7009 * Implements 'XGETBV'.
7010 */
7011IEM_CIMPL_DEF_0(iemCImpl_xgetbv)
7012{
7013 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7014 if (pCtx->cr4 & X86_CR4_OSXSAVE)
7015 {
7016 uint32_t uEcx = pCtx->ecx;
7017 switch (uEcx)
7018 {
7019 case 0:
7020 break;
7021
7022 case 1: /** @todo Implement XCR1 support. */
7023 default:
7024 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx));
7025 return iemRaiseGeneralProtectionFault0(pVCpu);
7026
7027 }
7028 pCtx->rax = RT_LO_U32(pCtx->aXcr[uEcx]);
7029 pCtx->rdx = RT_HI_U32(pCtx->aXcr[uEcx]);
7030
7031 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7032 return VINF_SUCCESS;
7033 }
7034 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n"));
7035 return iemRaiseUndefinedOpcode(pVCpu);
7036}
7037
7038
7039/**
7040 * Implements 'XSETBV'.
7041 */
7042IEM_CIMPL_DEF_0(iemCImpl_xsetbv)
7043{
7044 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7045 if (pCtx->cr4 & X86_CR4_OSXSAVE)
7046 {
7047 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
7048 {
7049 Log2(("xsetbv: Guest intercept -> #VMEXIT\n"));
7050 IEM_SVM_UPDATE_NRIP(pVCpu);
7051 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7052 }
7053
7054 if (pVCpu->iem.s.uCpl == 0)
7055 {
7056 uint32_t uEcx = pCtx->ecx;
7057 uint64_t uNewValue = RT_MAKE_U64(pCtx->eax, pCtx->edx);
7058 switch (uEcx)
7059 {
7060 case 0:
7061 {
7062 int rc = CPUMSetGuestXcr0(pVCpu, uNewValue);
7063 if (rc == VINF_SUCCESS)
7064 break;
7065 Assert(rc == VERR_CPUM_RAISE_GP_0);
7066 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
7067 return iemRaiseGeneralProtectionFault0(pVCpu);
7068 }
7069
7070 case 1: /** @todo Implement XCR1 support. */
7071 default:
7072 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
7073 return iemRaiseGeneralProtectionFault0(pVCpu);
7074
7075 }
7076
7077 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7078 return VINF_SUCCESS;
7079 }
7080
7081 Log(("xsetbv cpl=%u -> GP(0)\n", pVCpu->iem.s.uCpl));
7082 return iemRaiseGeneralProtectionFault0(pVCpu);
7083 }
7084 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n"));
7085 return iemRaiseUndefinedOpcode(pVCpu);
7086}
7087
7088#ifdef IN_RING3
7089
7090/** Argument package for iemCImpl_cmpxchg16b_fallback_rendezvous_callback. */
7091struct IEMCIMPLCX16ARGS
7092{
7093 PRTUINT128U pu128Dst;
7094 PRTUINT128U pu128RaxRdx;
7095 PRTUINT128U pu128RbxRcx;
7096 uint32_t *pEFlags;
7097# ifdef VBOX_STRICT
7098 uint32_t cCalls;
7099# endif
7100};
7101
7102/**
7103 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
7104 * Worker for iemCImpl_cmpxchg16b_fallback_rendezvous}
7105 */
7106static DECLCALLBACK(VBOXSTRICTRC) iemCImpl_cmpxchg16b_fallback_rendezvous_callback(PVM pVM, PVMCPU pVCpu, void *pvUser)
7107{
7108 RT_NOREF(pVM, pVCpu);
7109 struct IEMCIMPLCX16ARGS *pArgs = (struct IEMCIMPLCX16ARGS *)pvUser;
7110# ifdef VBOX_STRICT
7111 Assert(pArgs->cCalls == 0);
7112 pArgs->cCalls++;
7113# endif
7114
7115 iemAImpl_cmpxchg16b_fallback(pArgs->pu128Dst, pArgs->pu128RaxRdx, pArgs->pu128RbxRcx, pArgs->pEFlags);
7116 return VINF_SUCCESS;
7117}
7118
7119#endif /* IN_RING3 */
7120
7121/**
7122 * Implements 'CMPXCHG16B' fallback using rendezvous.
7123 */
7124IEM_CIMPL_DEF_4(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
7125 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags)
7126{
7127#ifdef IN_RING3
7128 struct IEMCIMPLCX16ARGS Args;
7129 Args.pu128Dst = pu128Dst;
7130 Args.pu128RaxRdx = pu128RaxRdx;
7131 Args.pu128RbxRcx = pu128RbxRcx;
7132 Args.pEFlags = pEFlags;
7133# ifdef VBOX_STRICT
7134 Args.cCalls = 0;
7135# endif
7136 VBOXSTRICTRC rcStrict = VMMR3EmtRendezvous(pVCpu->CTX_SUFF(pVM), VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
7137 iemCImpl_cmpxchg16b_fallback_rendezvous_callback, &Args);
7138 Assert(Args.cCalls == 1);
7139 if (rcStrict == VINF_SUCCESS)
7140 {
7141 /* Duplicated tail code. */
7142 rcStrict = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_RW);
7143 if (rcStrict == VINF_SUCCESS)
7144 {
7145 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);
7146 pCtx->eflags.u = *pEFlags; /* IEM_MC_COMMIT_EFLAGS */
7147 if (!(*pEFlags & X86_EFL_ZF))
7148 {
7149 pCtx->rax = pu128RaxRdx->s.Lo;
7150 pCtx->rdx = pu128RaxRdx->s.Hi;
7151 }
7152 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7153 }
7154 }
7155 return rcStrict;
7156#else
7157 RT_NOREF(pVCpu, cbInstr, pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags);
7158 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; /* This should get us to ring-3 for now. Should perhaps be replaced later. */
7159#endif
7160}
7161
7162
7163/**
7164 * Implements 'CLFLUSH' and 'CLFLUSHOPT'.
7165 *
7166 * This is implemented in C because it triggers a load like behviour without
7167 * actually reading anything. Since that's not so common, it's implemented
7168 * here.
7169 *
7170 * @param iEffSeg The effective segment.
7171 * @param GCPtrEff The address of the image.
7172 */
7173IEM_CIMPL_DEF_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
7174{
7175 /*
7176 * Pretend to do a load w/o reading (see also iemCImpl_monitor and iemMemMap).
7177 */
7178 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrEff);
7179 if (rcStrict == VINF_SUCCESS)
7180 {
7181 RTGCPHYS GCPhysMem;
7182 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrEff, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
7183 if (rcStrict == VINF_SUCCESS)
7184 {
7185 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7186 return VINF_SUCCESS;
7187 }
7188 }
7189
7190 return rcStrict;
7191}
7192
7193
7194/**
7195 * Implements 'FINIT' and 'FNINIT'.
7196 *
7197 * @param fCheckXcpts Whether to check for umasked pending exceptions or
7198 * not.
7199 */
7200IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
7201{
7202 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7203
7204 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
7205 return iemRaiseDeviceNotAvailable(pVCpu);
7206
7207 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
7208 if (fCheckXcpts && TODO )
7209 return iemRaiseMathFault(pVCpu);
7210 */
7211
7212 PX86XSAVEAREA pXState = pCtx->CTX_SUFF(pXState);
7213 pXState->x87.FCW = 0x37f;
7214 pXState->x87.FSW = 0;
7215 pXState->x87.FTW = 0x00; /* 0 - empty. */
7216 pXState->x87.FPUDP = 0;
7217 pXState->x87.DS = 0; //??
7218 pXState->x87.Rsrvd2= 0;
7219 pXState->x87.FPUIP = 0;
7220 pXState->x87.CS = 0; //??
7221 pXState->x87.Rsrvd1= 0;
7222 pXState->x87.FOP = 0;
7223
7224 iemHlpUsedFpu(pVCpu);
7225 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7226 return VINF_SUCCESS;
7227}
7228
7229
7230/**
7231 * Implements 'FXSAVE'.
7232 *
7233 * @param iEffSeg The effective segment.
7234 * @param GCPtrEff The address of the image.
7235 * @param enmEffOpSize The operand size (only REX.W really matters).
7236 */
7237IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
7238{
7239 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7240
7241 /*
7242 * Raise exceptions.
7243 */
7244 if (pCtx->cr0 & X86_CR0_EM)
7245 return iemRaiseUndefinedOpcode(pVCpu);
7246 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
7247 return iemRaiseDeviceNotAvailable(pVCpu);
7248 if (GCPtrEff & 15)
7249 {
7250 /** @todo CPU/VM detection possible! \#AC might not be signal for
7251 * all/any misalignment sizes, intel says its an implementation detail. */
7252 if ( (pCtx->cr0 & X86_CR0_AM)
7253 && pCtx->eflags.Bits.u1AC
7254 && pVCpu->iem.s.uCpl == 3)
7255 return iemRaiseAlignmentCheckException(pVCpu);
7256 return iemRaiseGeneralProtectionFault0(pVCpu);
7257 }
7258
7259 /*
7260 * Access the memory.
7261 */
7262 void *pvMem512;
7263 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7264 if (rcStrict != VINF_SUCCESS)
7265 return rcStrict;
7266 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
7267 PCX86FXSTATE pSrc = &pCtx->CTX_SUFF(pXState)->x87;
7268
7269 /*
7270 * Store the registers.
7271 */
7272 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
7273 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
7274
7275 /* common for all formats */
7276 pDst->FCW = pSrc->FCW;
7277 pDst->FSW = pSrc->FSW;
7278 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
7279 pDst->FOP = pSrc->FOP;
7280 pDst->MXCSR = pSrc->MXCSR;
7281 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
7282 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
7283 {
7284 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
7285 * them for now... */
7286 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
7287 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
7288 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
7289 pDst->aRegs[i].au32[3] = 0;
7290 }
7291
7292 /* FPU IP, CS, DP and DS. */
7293 pDst->FPUIP = pSrc->FPUIP;
7294 pDst->CS = pSrc->CS;
7295 pDst->FPUDP = pSrc->FPUDP;
7296 pDst->DS = pSrc->DS;
7297 if (enmEffOpSize == IEMMODE_64BIT)
7298 {
7299 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
7300 pDst->Rsrvd1 = pSrc->Rsrvd1;
7301 pDst->Rsrvd2 = pSrc->Rsrvd2;
7302 pDst->au32RsrvdForSoftware[0] = 0;
7303 }
7304 else
7305 {
7306 pDst->Rsrvd1 = 0;
7307 pDst->Rsrvd2 = 0;
7308 pDst->au32RsrvdForSoftware[0] = X86_FXSTATE_RSVD_32BIT_MAGIC;
7309 }
7310
7311 /* XMM registers. */
7312 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
7313 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
7314 || pVCpu->iem.s.uCpl != 0)
7315 {
7316 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7317 for (uint32_t i = 0; i < cXmmRegs; i++)
7318 pDst->aXMM[i] = pSrc->aXMM[i];
7319 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
7320 * right? */
7321 }
7322
7323 /*
7324 * Commit the memory.
7325 */
7326 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7327 if (rcStrict != VINF_SUCCESS)
7328 return rcStrict;
7329
7330 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7331 return VINF_SUCCESS;
7332}
7333
7334
7335/**
7336 * Implements 'FXRSTOR'.
7337 *
7338 * @param GCPtrEff The address of the image.
7339 * @param enmEffOpSize The operand size (only REX.W really matters).
7340 */
7341IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
7342{
7343 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7344
7345 /*
7346 * Raise exceptions.
7347 */
7348 if (pCtx->cr0 & X86_CR0_EM)
7349 return iemRaiseUndefinedOpcode(pVCpu);
7350 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
7351 return iemRaiseDeviceNotAvailable(pVCpu);
7352 if (GCPtrEff & 15)
7353 {
7354 /** @todo CPU/VM detection possible! \#AC might not be signal for
7355 * all/any misalignment sizes, intel says its an implementation detail. */
7356 if ( (pCtx->cr0 & X86_CR0_AM)
7357 && pCtx->eflags.Bits.u1AC
7358 && pVCpu->iem.s.uCpl == 3)
7359 return iemRaiseAlignmentCheckException(pVCpu);
7360 return iemRaiseGeneralProtectionFault0(pVCpu);
7361 }
7362
7363 /*
7364 * Access the memory.
7365 */
7366 void *pvMem512;
7367 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
7368 if (rcStrict != VINF_SUCCESS)
7369 return rcStrict;
7370 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
7371 PX86FXSTATE pDst = &pCtx->CTX_SUFF(pXState)->x87;
7372
7373 /*
7374 * Check the state for stuff which will #GP(0).
7375 */
7376 uint32_t const fMXCSR = pSrc->MXCSR;
7377 uint32_t const fMXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
7378 if (fMXCSR & ~fMXCSR_MASK)
7379 {
7380 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
7381 return iemRaiseGeneralProtectionFault0(pVCpu);
7382 }
7383
7384 /*
7385 * Load the registers.
7386 */
7387 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
7388 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
7389
7390 /* common for all formats */
7391 pDst->FCW = pSrc->FCW;
7392 pDst->FSW = pSrc->FSW;
7393 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
7394 pDst->FOP = pSrc->FOP;
7395 pDst->MXCSR = fMXCSR;
7396 /* (MXCSR_MASK is read-only) */
7397 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
7398 {
7399 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
7400 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
7401 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
7402 pDst->aRegs[i].au32[3] = 0;
7403 }
7404
7405 /* FPU IP, CS, DP and DS. */
7406 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7407 {
7408 pDst->FPUIP = pSrc->FPUIP;
7409 pDst->CS = pSrc->CS;
7410 pDst->Rsrvd1 = pSrc->Rsrvd1;
7411 pDst->FPUDP = pSrc->FPUDP;
7412 pDst->DS = pSrc->DS;
7413 pDst->Rsrvd2 = pSrc->Rsrvd2;
7414 }
7415 else
7416 {
7417 pDst->FPUIP = pSrc->FPUIP;
7418 pDst->CS = pSrc->CS;
7419 pDst->Rsrvd1 = 0;
7420 pDst->FPUDP = pSrc->FPUDP;
7421 pDst->DS = pSrc->DS;
7422 pDst->Rsrvd2 = 0;
7423 }
7424
7425 /* XMM registers. */
7426 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
7427 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
7428 || pVCpu->iem.s.uCpl != 0)
7429 {
7430 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7431 for (uint32_t i = 0; i < cXmmRegs; i++)
7432 pDst->aXMM[i] = pSrc->aXMM[i];
7433 }
7434
7435 /*
7436 * Commit the memory.
7437 */
7438 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
7439 if (rcStrict != VINF_SUCCESS)
7440 return rcStrict;
7441
7442 iemHlpUsedFpu(pVCpu);
7443 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7444 return VINF_SUCCESS;
7445}
7446
7447
7448/**
7449 * Implements 'XSAVE'.
7450 *
7451 * @param iEffSeg The effective segment.
7452 * @param GCPtrEff The address of the image.
7453 * @param enmEffOpSize The operand size (only REX.W really matters).
7454 */
7455IEM_CIMPL_DEF_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
7456{
7457 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7458
7459 /*
7460 * Raise exceptions.
7461 */
7462 if (!(pCtx->cr4 & X86_CR4_OSXSAVE))
7463 return iemRaiseUndefinedOpcode(pVCpu);
7464 if (pCtx->cr0 & X86_CR0_TS)
7465 return iemRaiseDeviceNotAvailable(pVCpu);
7466 if (GCPtrEff & 63)
7467 {
7468 /** @todo CPU/VM detection possible! \#AC might not be signal for
7469 * all/any misalignment sizes, intel says its an implementation detail. */
7470 if ( (pCtx->cr0 & X86_CR0_AM)
7471 && pCtx->eflags.Bits.u1AC
7472 && pVCpu->iem.s.uCpl == 3)
7473 return iemRaiseAlignmentCheckException(pVCpu);
7474 return iemRaiseGeneralProtectionFault0(pVCpu);
7475 }
7476
7477 /*
7478 * Calc the requested mask
7479 */
7480 uint64_t const fReqComponents = RT_MAKE_U64(pCtx->eax, pCtx->edx) & pCtx->aXcr[0];
7481 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
7482 uint64_t const fXInUse = pCtx->aXcr[0];
7483
7484/** @todo figure out the exact protocol for the memory access. Currently we
7485 * just need this crap to work halfways to make it possible to test
7486 * AVX instructions. */
7487/** @todo figure out the XINUSE and XMODIFIED */
7488
7489 /*
7490 * Access the x87 memory state.
7491 */
7492 /* The x87+SSE state. */
7493 void *pvMem512;
7494 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7495 if (rcStrict != VINF_SUCCESS)
7496 return rcStrict;
7497 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
7498 PCX86FXSTATE pSrc = &pCtx->CTX_SUFF(pXState)->x87;
7499
7500 /* The header. */
7501 PX86XSAVEHDR pHdr;
7502 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW);
7503 if (rcStrict != VINF_SUCCESS)
7504 return rcStrict;
7505
7506 /*
7507 * Store the X87 state.
7508 */
7509 if (fReqComponents & XSAVE_C_X87)
7510 {
7511 /* common for all formats */
7512 pDst->FCW = pSrc->FCW;
7513 pDst->FSW = pSrc->FSW;
7514 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
7515 pDst->FOP = pSrc->FOP;
7516 pDst->FPUIP = pSrc->FPUIP;
7517 pDst->CS = pSrc->CS;
7518 pDst->FPUDP = pSrc->FPUDP;
7519 pDst->DS = pSrc->DS;
7520 if (enmEffOpSize == IEMMODE_64BIT)
7521 {
7522 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
7523 pDst->Rsrvd1 = pSrc->Rsrvd1;
7524 pDst->Rsrvd2 = pSrc->Rsrvd2;
7525 pDst->au32RsrvdForSoftware[0] = 0;
7526 }
7527 else
7528 {
7529 pDst->Rsrvd1 = 0;
7530 pDst->Rsrvd2 = 0;
7531 pDst->au32RsrvdForSoftware[0] = X86_FXSTATE_RSVD_32BIT_MAGIC;
7532 }
7533 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
7534 {
7535 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
7536 * them for now... */
7537 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
7538 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
7539 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
7540 pDst->aRegs[i].au32[3] = 0;
7541 }
7542
7543 }
7544
7545 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
7546 {
7547 pDst->MXCSR = pSrc->MXCSR;
7548 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
7549 }
7550
7551 if (fReqComponents & XSAVE_C_SSE)
7552 {
7553 /* XMM registers. */
7554 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7555 for (uint32_t i = 0; i < cXmmRegs; i++)
7556 pDst->aXMM[i] = pSrc->aXMM[i];
7557 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
7558 * right? */
7559 }
7560
7561 /* Commit the x87 state bits. (probably wrong) */
7562 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7563 if (rcStrict != VINF_SUCCESS)
7564 return rcStrict;
7565
7566 /*
7567 * Store AVX state.
7568 */
7569 if (fReqComponents & XSAVE_C_YMM)
7570 {
7571 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
7572 AssertLogRelReturn(pCtx->aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
7573 PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI);
7574 PX86XSAVEYMMHI pCompDst;
7575 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + pCtx->aoffXState[XSAVE_C_YMM_BIT],
7576 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7577 if (rcStrict != VINF_SUCCESS)
7578 return rcStrict;
7579
7580 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7581 for (uint32_t i = 0; i < cXmmRegs; i++)
7582 pCompDst->aYmmHi[i] = pCompSrc->aYmmHi[i];
7583
7584 rcStrict = iemMemCommitAndUnmap(pVCpu, pCompDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7585 if (rcStrict != VINF_SUCCESS)
7586 return rcStrict;
7587 }
7588
7589 /*
7590 * Update the header.
7591 */
7592 pHdr->bmXState = (pHdr->bmXState & ~fReqComponents)
7593 | (fReqComponents & fXInUse);
7594
7595 rcStrict = iemMemCommitAndUnmap(pVCpu, pHdr, IEM_ACCESS_DATA_RW);
7596 if (rcStrict != VINF_SUCCESS)
7597 return rcStrict;
7598
7599 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7600 return VINF_SUCCESS;
7601}
7602
7603
7604/**
7605 * Implements 'XRSTOR'.
7606 *
7607 * @param iEffSeg The effective segment.
7608 * @param GCPtrEff The address of the image.
7609 * @param enmEffOpSize The operand size (only REX.W really matters).
7610 */
7611IEM_CIMPL_DEF_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
7612{
7613 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7614
7615 /*
7616 * Raise exceptions.
7617 */
7618 if (!(pCtx->cr4 & X86_CR4_OSXSAVE))
7619 return iemRaiseUndefinedOpcode(pVCpu);
7620 if (pCtx->cr0 & X86_CR0_TS)
7621 return iemRaiseDeviceNotAvailable(pVCpu);
7622 if (GCPtrEff & 63)
7623 {
7624 /** @todo CPU/VM detection possible! \#AC might not be signal for
7625 * all/any misalignment sizes, intel says its an implementation detail. */
7626 if ( (pCtx->cr0 & X86_CR0_AM)
7627 && pCtx->eflags.Bits.u1AC
7628 && pVCpu->iem.s.uCpl == 3)
7629 return iemRaiseAlignmentCheckException(pVCpu);
7630 return iemRaiseGeneralProtectionFault0(pVCpu);
7631 }
7632
7633/** @todo figure out the exact protocol for the memory access. Currently we
7634 * just need this crap to work halfways to make it possible to test
7635 * AVX instructions. */
7636/** @todo figure out the XINUSE and XMODIFIED */
7637
7638 /*
7639 * Access the x87 memory state.
7640 */
7641 /* The x87+SSE state. */
7642 void *pvMem512;
7643 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
7644 if (rcStrict != VINF_SUCCESS)
7645 return rcStrict;
7646 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
7647 PX86FXSTATE pDst = &pCtx->CTX_SUFF(pXState)->x87;
7648
7649 /*
7650 * Calc the requested mask
7651 */
7652 PX86XSAVEHDR pHdrDst = &pCtx->CTX_SUFF(pXState)->Hdr;
7653 PCX86XSAVEHDR pHdrSrc;
7654 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_R);
7655 if (rcStrict != VINF_SUCCESS)
7656 return rcStrict;
7657
7658 uint64_t const fReqComponents = RT_MAKE_U64(pCtx->eax, pCtx->edx) & pCtx->aXcr[0];
7659 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
7660 //uint64_t const fXInUse = pCtx->aXcr[0];
7661 uint64_t const fRstorMask = pHdrSrc->bmXState;
7662 uint64_t const fCompMask = pHdrSrc->bmXComp;
7663
7664 AssertLogRelReturn(!(fCompMask & XSAVE_C_X), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
7665
7666 uint32_t const cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7667
7668 /* We won't need this any longer. */
7669 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pHdrSrc, IEM_ACCESS_DATA_R);
7670 if (rcStrict != VINF_SUCCESS)
7671 return rcStrict;
7672
7673 /*
7674 * Store the X87 state.
7675 */
7676 if (fReqComponents & XSAVE_C_X87)
7677 {
7678 if (fRstorMask & XSAVE_C_X87)
7679 {
7680 pDst->FCW = pSrc->FCW;
7681 pDst->FSW = pSrc->FSW;
7682 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
7683 pDst->FOP = pSrc->FOP;
7684 pDst->FPUIP = pSrc->FPUIP;
7685 pDst->CS = pSrc->CS;
7686 pDst->FPUDP = pSrc->FPUDP;
7687 pDst->DS = pSrc->DS;
7688 if (enmEffOpSize == IEMMODE_64BIT)
7689 {
7690 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
7691 pDst->Rsrvd1 = pSrc->Rsrvd1;
7692 pDst->Rsrvd2 = pSrc->Rsrvd2;
7693 }
7694 else
7695 {
7696 pDst->Rsrvd1 = 0;
7697 pDst->Rsrvd2 = 0;
7698 }
7699 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
7700 {
7701 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
7702 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
7703 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
7704 pDst->aRegs[i].au32[3] = 0;
7705 }
7706 }
7707 else
7708 {
7709 pDst->FCW = 0x37f;
7710 pDst->FSW = 0;
7711 pDst->FTW = 0x00; /* 0 - empty. */
7712 pDst->FPUDP = 0;
7713 pDst->DS = 0; //??
7714 pDst->Rsrvd2= 0;
7715 pDst->FPUIP = 0;
7716 pDst->CS = 0; //??
7717 pDst->Rsrvd1= 0;
7718 pDst->FOP = 0;
7719 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
7720 {
7721 pDst->aRegs[i].au32[0] = 0;
7722 pDst->aRegs[i].au32[1] = 0;
7723 pDst->aRegs[i].au32[2] = 0;
7724 pDst->aRegs[i].au32[3] = 0;
7725 }
7726 }
7727 pHdrDst->bmXState |= XSAVE_C_X87; /* playing safe for now */
7728 }
7729
7730 /* MXCSR */
7731 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
7732 {
7733 if (fRstorMask & (XSAVE_C_SSE | XSAVE_C_YMM))
7734 pDst->MXCSR = pSrc->MXCSR;
7735 else
7736 pDst->MXCSR = 0x1f80;
7737 }
7738
7739 /* XMM registers. */
7740 if (fReqComponents & XSAVE_C_SSE)
7741 {
7742 if (fRstorMask & XSAVE_C_SSE)
7743 {
7744 for (uint32_t i = 0; i < cXmmRegs; i++)
7745 pDst->aXMM[i] = pSrc->aXMM[i];
7746 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
7747 * right? */
7748 }
7749 else
7750 {
7751 for (uint32_t i = 0; i < cXmmRegs; i++)
7752 {
7753 pDst->aXMM[i].au64[0] = 0;
7754 pDst->aXMM[i].au64[1] = 0;
7755 }
7756 }
7757 pHdrDst->bmXState |= XSAVE_C_SSE; /* playing safe for now */
7758 }
7759
7760 /* Unmap the x87 state bits (so we've don't run out of mapping). */
7761 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
7762 if (rcStrict != VINF_SUCCESS)
7763 return rcStrict;
7764
7765 /*
7766 * Restore AVX state.
7767 */
7768 if (fReqComponents & XSAVE_C_YMM)
7769 {
7770 AssertLogRelReturn(pCtx->aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
7771 PX86XSAVEYMMHI pCompDst = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_YMM_BIT, PX86XSAVEYMMHI);
7772
7773 if (fRstorMask & XSAVE_C_YMM)
7774 {
7775 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
7776 PCX86XSAVEYMMHI pCompSrc;
7777 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, sizeof(*pCompDst),
7778 iEffSeg, GCPtrEff + pCtx->aoffXState[XSAVE_C_YMM_BIT], IEM_ACCESS_DATA_R);
7779 if (rcStrict != VINF_SUCCESS)
7780 return rcStrict;
7781
7782 for (uint32_t i = 0; i < cXmmRegs; i++)
7783 {
7784 pCompDst->aYmmHi[i].au64[0] = pCompSrc->aYmmHi[i].au64[0];
7785 pCompDst->aYmmHi[i].au64[1] = pCompSrc->aYmmHi[i].au64[1];
7786 }
7787
7788 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pCompSrc, IEM_ACCESS_DATA_R);
7789 if (rcStrict != VINF_SUCCESS)
7790 return rcStrict;
7791 }
7792 else
7793 {
7794 for (uint32_t i = 0; i < cXmmRegs; i++)
7795 {
7796 pCompDst->aYmmHi[i].au64[0] = 0;
7797 pCompDst->aYmmHi[i].au64[1] = 0;
7798 }
7799 }
7800 pHdrDst->bmXState |= XSAVE_C_YMM; /* playing safe for now */
7801 }
7802
7803 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7804 return VINF_SUCCESS;
7805}
7806
7807
7808
7809
7810/**
7811 * Implements 'STMXCSR'.
7812 *
7813 * @param GCPtrEff The address of the image.
7814 */
7815IEM_CIMPL_DEF_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
7816{
7817 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7818
7819 /*
7820 * Raise exceptions.
7821 */
7822 if ( !(pCtx->cr0 & X86_CR0_EM)
7823 && (pCtx->cr4 & X86_CR4_OSFXSR))
7824 {
7825 if (!(pCtx->cr0 & X86_CR0_TS))
7826 {
7827 /*
7828 * Do the job.
7829 */
7830 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pCtx->CTX_SUFF(pXState)->x87.MXCSR);
7831 if (rcStrict == VINF_SUCCESS)
7832 {
7833 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7834 return VINF_SUCCESS;
7835 }
7836 return rcStrict;
7837 }
7838 return iemRaiseDeviceNotAvailable(pVCpu);
7839 }
7840 return iemRaiseUndefinedOpcode(pVCpu);
7841}
7842
7843
7844/**
7845 * Implements 'VSTMXCSR'.
7846 *
7847 * @param GCPtrEff The address of the image.
7848 */
7849IEM_CIMPL_DEF_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
7850{
7851 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7852
7853 /*
7854 * Raise exceptions.
7855 */
7856 if ( ( !IEM_IS_GUEST_CPU_AMD(pVCpu)
7857 ? (pCtx->aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM)
7858 : !(pCtx->cr0 & X86_CR0_EM)) /* AMD Jaguar CPU (f0x16,m0,s1) behaviour */
7859 && (pCtx->cr4 & X86_CR4_OSXSAVE))
7860 {
7861 if (!(pCtx->cr0 & X86_CR0_TS))
7862 {
7863 /*
7864 * Do the job.
7865 */
7866 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pCtx->CTX_SUFF(pXState)->x87.MXCSR);
7867 if (rcStrict == VINF_SUCCESS)
7868 {
7869 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7870 return VINF_SUCCESS;
7871 }
7872 return rcStrict;
7873 }
7874 return iemRaiseDeviceNotAvailable(pVCpu);
7875 }
7876 return iemRaiseUndefinedOpcode(pVCpu);
7877}
7878
7879
7880/**
7881 * Implements 'LDMXCSR'.
7882 *
7883 * @param GCPtrEff The address of the image.
7884 */
7885IEM_CIMPL_DEF_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
7886{
7887 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7888
7889 /*
7890 * Raise exceptions.
7891 */
7892 /** @todo testcase - order of LDMXCSR faults. Does \#PF, \#GP and \#SS
7893 * happen after or before \#UD and \#EM? */
7894 if ( !(pCtx->cr0 & X86_CR0_EM)
7895 && (pCtx->cr4 & X86_CR4_OSFXSR))
7896 {
7897 if (!(pCtx->cr0 & X86_CR0_TS))
7898 {
7899 /*
7900 * Do the job.
7901 */
7902 uint32_t fNewMxCsr;
7903 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, &fNewMxCsr, iEffSeg, GCPtrEff);
7904 if (rcStrict == VINF_SUCCESS)
7905 {
7906 uint32_t const fMxCsrMask = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
7907 if (!(fNewMxCsr & ~fMxCsrMask))
7908 {
7909 pCtx->CTX_SUFF(pXState)->x87.MXCSR = fNewMxCsr;
7910 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7911 return VINF_SUCCESS;
7912 }
7913 Log(("lddmxcsr: New MXCSR=%#RX32 & ~MASK=%#RX32 = %#RX32 -> #GP(0)\n",
7914 fNewMxCsr, fMxCsrMask, fNewMxCsr & ~fMxCsrMask));
7915 return iemRaiseGeneralProtectionFault0(pVCpu);
7916 }
7917 return rcStrict;
7918 }
7919 return iemRaiseDeviceNotAvailable(pVCpu);
7920 }
7921 return iemRaiseUndefinedOpcode(pVCpu);
7922}
7923
7924
7925/**
7926 * Commmon routine for fnstenv and fnsave.
7927 *
7928 * @param uPtr Where to store the state.
7929 * @param pCtx The CPU context.
7930 */
7931static void iemCImplCommonFpuStoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
7932{
7933 PCX86FXSTATE pSrcX87 = &pCtx->CTX_SUFF(pXState)->x87;
7934 if (enmEffOpSize == IEMMODE_16BIT)
7935 {
7936 uPtr.pu16[0] = pSrcX87->FCW;
7937 uPtr.pu16[1] = pSrcX87->FSW;
7938 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87);
7939 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7940 {
7941 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
7942 * protected mode or long mode and we save it in real mode? And vice
7943 * versa? And with 32-bit operand size? I think CPU is storing the
7944 * effective address ((CS << 4) + IP) in the offset register and not
7945 * doing any address calculations here. */
7946 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP;
7947 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP;
7948 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP;
7949 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000);
7950 }
7951 else
7952 {
7953 uPtr.pu16[3] = pSrcX87->FPUIP;
7954 uPtr.pu16[4] = pSrcX87->CS;
7955 uPtr.pu16[5] = pSrcX87->FPUDP;
7956 uPtr.pu16[6] = pSrcX87->DS;
7957 }
7958 }
7959 else
7960 {
7961 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
7962 uPtr.pu16[0*2] = pSrcX87->FCW;
7963 uPtr.pu16[0*2+1] = 0xffff; /* (0xffff observed on intel skylake.) */
7964 uPtr.pu16[1*2] = pSrcX87->FSW;
7965 uPtr.pu16[1*2+1] = 0xffff;
7966 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87);
7967 uPtr.pu16[2*2+1] = 0xffff;
7968 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7969 {
7970 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP;
7971 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP;
7972 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP;
7973 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4;
7974 }
7975 else
7976 {
7977 uPtr.pu32[3] = pSrcX87->FPUIP;
7978 uPtr.pu16[4*2] = pSrcX87->CS;
7979 uPtr.pu16[4*2+1] = pSrcX87->FOP;
7980 uPtr.pu32[5] = pSrcX87->FPUDP;
7981 uPtr.pu16[6*2] = pSrcX87->DS;
7982 uPtr.pu16[6*2+1] = 0xffff;
7983 }
7984 }
7985}
7986
7987
7988/**
7989 * Commmon routine for fldenv and frstor
7990 *
7991 * @param uPtr Where to store the state.
7992 * @param pCtx The CPU context.
7993 */
7994static void iemCImplCommonFpuRestoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
7995{
7996 PX86FXSTATE pDstX87 = &pCtx->CTX_SUFF(pXState)->x87;
7997 if (enmEffOpSize == IEMMODE_16BIT)
7998 {
7999 pDstX87->FCW = uPtr.pu16[0];
8000 pDstX87->FSW = uPtr.pu16[1];
8001 pDstX87->FTW = uPtr.pu16[2];
8002 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
8003 {
8004 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
8005 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
8006 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
8007 pDstX87->CS = 0;
8008 pDstX87->Rsrvd1= 0;
8009 pDstX87->DS = 0;
8010 pDstX87->Rsrvd2= 0;
8011 }
8012 else
8013 {
8014 pDstX87->FPUIP = uPtr.pu16[3];
8015 pDstX87->CS = uPtr.pu16[4];
8016 pDstX87->Rsrvd1= 0;
8017 pDstX87->FPUDP = uPtr.pu16[5];
8018 pDstX87->DS = uPtr.pu16[6];
8019 pDstX87->Rsrvd2= 0;
8020 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
8021 }
8022 }
8023 else
8024 {
8025 pDstX87->FCW = uPtr.pu16[0*2];
8026 pDstX87->FSW = uPtr.pu16[1*2];
8027 pDstX87->FTW = uPtr.pu16[2*2];
8028 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
8029 {
8030 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
8031 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
8032 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
8033 pDstX87->CS = 0;
8034 pDstX87->Rsrvd1= 0;
8035 pDstX87->DS = 0;
8036 pDstX87->Rsrvd2= 0;
8037 }
8038 else
8039 {
8040 pDstX87->FPUIP = uPtr.pu32[3];
8041 pDstX87->CS = uPtr.pu16[4*2];
8042 pDstX87->Rsrvd1= 0;
8043 pDstX87->FOP = uPtr.pu16[4*2+1];
8044 pDstX87->FPUDP = uPtr.pu32[5];
8045 pDstX87->DS = uPtr.pu16[6*2];
8046 pDstX87->Rsrvd2= 0;
8047 }
8048 }
8049
8050 /* Make adjustments. */
8051 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW);
8052 pDstX87->FCW &= ~X86_FCW_ZERO_MASK;
8053 iemFpuRecalcExceptionStatus(pDstX87);
8054 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
8055 * exceptions are pending after loading the saved state? */
8056}
8057
8058
8059/**
8060 * Implements 'FNSTENV'.
8061 *
8062 * @param enmEffOpSize The operand size (only REX.W really matters).
8063 * @param iEffSeg The effective segment register for @a GCPtrEff.
8064 * @param GCPtrEffDst The address of the image.
8065 */
8066IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
8067{
8068 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8069 RTPTRUNION uPtr;
8070 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
8071 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8072 if (rcStrict != VINF_SUCCESS)
8073 return rcStrict;
8074
8075 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx);
8076
8077 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8078 if (rcStrict != VINF_SUCCESS)
8079 return rcStrict;
8080
8081 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
8082 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8083 return VINF_SUCCESS;
8084}
8085
8086
8087/**
8088 * Implements 'FNSAVE'.
8089 *
8090 * @param GCPtrEffDst The address of the image.
8091 * @param enmEffOpSize The operand size.
8092 */
8093IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
8094{
8095 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8096 RTPTRUNION uPtr;
8097 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
8098 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8099 if (rcStrict != VINF_SUCCESS)
8100 return rcStrict;
8101
8102 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8103 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx);
8104 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
8105 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
8106 {
8107 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0];
8108 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1];
8109 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4];
8110 }
8111
8112 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8113 if (rcStrict != VINF_SUCCESS)
8114 return rcStrict;
8115
8116 /*
8117 * Re-initialize the FPU context.
8118 */
8119 pFpuCtx->FCW = 0x37f;
8120 pFpuCtx->FSW = 0;
8121 pFpuCtx->FTW = 0x00; /* 0 - empty */
8122 pFpuCtx->FPUDP = 0;
8123 pFpuCtx->DS = 0;
8124 pFpuCtx->Rsrvd2= 0;
8125 pFpuCtx->FPUIP = 0;
8126 pFpuCtx->CS = 0;
8127 pFpuCtx->Rsrvd1= 0;
8128 pFpuCtx->FOP = 0;
8129
8130 iemHlpUsedFpu(pVCpu);
8131 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8132 return VINF_SUCCESS;
8133}
8134
8135
8136
8137/**
8138 * Implements 'FLDENV'.
8139 *
8140 * @param enmEffOpSize The operand size (only REX.W really matters).
8141 * @param iEffSeg The effective segment register for @a GCPtrEff.
8142 * @param GCPtrEffSrc The address of the image.
8143 */
8144IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
8145{
8146 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8147 RTCPTRUNION uPtr;
8148 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
8149 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
8150 if (rcStrict != VINF_SUCCESS)
8151 return rcStrict;
8152
8153 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx);
8154
8155 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
8156 if (rcStrict != VINF_SUCCESS)
8157 return rcStrict;
8158
8159 iemHlpUsedFpu(pVCpu);
8160 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8161 return VINF_SUCCESS;
8162}
8163
8164
8165/**
8166 * Implements 'FRSTOR'.
8167 *
8168 * @param GCPtrEffSrc The address of the image.
8169 * @param enmEffOpSize The operand size.
8170 */
8171IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
8172{
8173 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8174 RTCPTRUNION uPtr;
8175 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
8176 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
8177 if (rcStrict != VINF_SUCCESS)
8178 return rcStrict;
8179
8180 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8181 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx);
8182 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
8183 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
8184 {
8185 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0];
8186 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1];
8187 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4];
8188 pFpuCtx->aRegs[i].au32[3] = 0;
8189 }
8190
8191 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
8192 if (rcStrict != VINF_SUCCESS)
8193 return rcStrict;
8194
8195 iemHlpUsedFpu(pVCpu);
8196 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8197 return VINF_SUCCESS;
8198}
8199
8200
8201/**
8202 * Implements 'FLDCW'.
8203 *
8204 * @param u16Fcw The new FCW.
8205 */
8206IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
8207{
8208 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8209
8210 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
8211 /** @todo Testcase: Try see what happens when trying to set undefined bits
8212 * (other than 6 and 7). Currently ignoring them. */
8213 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
8214 * according to FSW. (This is was is currently implemented.) */
8215 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8216 pFpuCtx->FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
8217 iemFpuRecalcExceptionStatus(pFpuCtx);
8218
8219 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
8220 iemHlpUsedFpu(pVCpu);
8221 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8222 return VINF_SUCCESS;
8223}
8224
8225
8226
8227/**
8228 * Implements the underflow case of fxch.
8229 *
8230 * @param iStReg The other stack register.
8231 */
8232IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
8233{
8234 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8235
8236 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8237 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW);
8238 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
8239 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW));
8240
8241 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
8242 * registers are read as QNaN and then exchanged. This could be
8243 * wrong... */
8244 if (pFpuCtx->FCW & X86_FCW_IM)
8245 {
8246 if (RT_BIT(iReg1) & pFpuCtx->FTW)
8247 {
8248 if (RT_BIT(iReg2) & pFpuCtx->FTW)
8249 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
8250 else
8251 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80;
8252 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
8253 }
8254 else
8255 {
8256 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80;
8257 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
8258 }
8259 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
8260 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
8261 }
8262 else
8263 {
8264 /* raise underflow exception, don't change anything. */
8265 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
8266 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
8267 }
8268
8269 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
8270 iemHlpUsedFpu(pVCpu);
8271 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8272 return VINF_SUCCESS;
8273}
8274
8275
8276/**
8277 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
8278 *
8279 * @param cToAdd 1 or 7.
8280 */
8281IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
8282{
8283 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
8284 Assert(iStReg < 8);
8285
8286 /*
8287 * Raise exceptions.
8288 */
8289 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
8290 return iemRaiseDeviceNotAvailable(pVCpu);
8291
8292 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
8293 uint16_t u16Fsw = pFpuCtx->FSW;
8294 if (u16Fsw & X86_FSW_ES)
8295 return iemRaiseMathFault(pVCpu);
8296
8297 /*
8298 * Check if any of the register accesses causes #SF + #IA.
8299 */
8300 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
8301 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
8302 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
8303 {
8304 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
8305 NOREF(u32Eflags);
8306
8307 pFpuCtx->FSW &= ~X86_FSW_C1;
8308 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
8309 if ( !(u16Fsw & X86_FSW_IE)
8310 || (pFpuCtx->FCW & X86_FCW_IM) )
8311 {
8312 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
8313 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
8314 }
8315 }
8316 else if (pFpuCtx->FCW & X86_FCW_IM)
8317 {
8318 /* Masked underflow. */
8319 pFpuCtx->FSW &= ~X86_FSW_C1;
8320 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
8321 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
8322 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
8323 }
8324 else
8325 {
8326 /* Raise underflow - don't touch EFLAGS or TOP. */
8327 pFpuCtx->FSW &= ~X86_FSW_C1;
8328 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
8329 fPop = false;
8330 }
8331
8332 /*
8333 * Pop if necessary.
8334 */
8335 if (fPop)
8336 {
8337 pFpuCtx->FTW &= ~RT_BIT(iReg1);
8338 pFpuCtx->FSW &= X86_FSW_TOP_MASK;
8339 pFpuCtx->FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
8340 }
8341
8342 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
8343 iemHlpUsedFpu(pVCpu);
8344 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8345 return VINF_SUCCESS;
8346}
8347
8348/** @} */
8349
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette