VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 80253

Last change on this file since 80253 was 80089, checked in by vboxsync, 5 years ago

VMM: Kicking out raw-mode - IEM. bugref:9517

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 322.7 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 80089 2019-07-31 20:50:22Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#include "IEMAllCImplSvmInstr.cpp.h"
19#include "IEMAllCImplVmxInstr.cpp.h"
20
21
22/** @name Misc Helpers
23 * @{
24 */
25
26
27/**
28 * Worker function for iemHlpCheckPortIOPermission, don't call directly.
29 *
30 * @returns Strict VBox status code.
31 *
32 * @param pVCpu The cross context virtual CPU structure of the calling thread.
33 * @param u16Port The port number.
34 * @param cbOperand The operand size.
35 */
36static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PVMCPU pVCpu, uint16_t u16Port, uint8_t cbOperand)
37{
38 /* The TSS bits we're interested in are the same on 386 and AMD64. */
39 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY);
40 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
41 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap);
42 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64));
43
44 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
45
46 /*
47 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
48 */
49 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType);
50 if (RT_UNLIKELY( pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY
51 && pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))
52 {
53 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
54 u16Port, cbOperand, pVCpu->cpum.GstCtx.tr.Attr.n.u4Type, pVCpu->cpum.GstCtx.tr.Attr.u));
55 return iemRaiseGeneralProtectionFault0(pVCpu);
56 }
57
58 /*
59 * Read the bitmap offset (may #PF).
60 */
61 uint16_t offBitmap;
62 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &offBitmap, UINT8_MAX,
63 pVCpu->cpum.GstCtx.tr.u64Base + RT_UOFFSETOF(X86TSS64, offIoBitmap));
64 if (rcStrict != VINF_SUCCESS)
65 {
66 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
67 return rcStrict;
68 }
69
70 /*
71 * The bit range from u16Port to (u16Port + cbOperand - 1), however intel
72 * describes the CPU actually reading two bytes regardless of whether the
73 * bit range crosses a byte boundrary. Thus the + 1 in the test below.
74 */
75 uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap;
76 /** @todo check if real CPUs ensures that offBitmap has a minimum value of
77 * for instance sizeof(X86TSS32). */
78 if (offFirstBit + 1 > pVCpu->cpum.GstCtx.tr.u32Limit) /* the limit is inclusive */
79 {
80 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
81 offFirstBit, pVCpu->cpum.GstCtx.tr.u32Limit));
82 return iemRaiseGeneralProtectionFault0(pVCpu);
83 }
84
85 /*
86 * Read the necessary bits.
87 */
88 /** @todo Test the assertion in the intel manual that the CPU reads two
89 * bytes. The question is how this works wrt to #PF and #GP on the
90 * 2nd byte when it's not required. */
91 uint16_t bmBytes = UINT16_MAX;
92 rcStrict = iemMemFetchSysU16(pVCpu, &bmBytes, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + offFirstBit);
93 if (rcStrict != VINF_SUCCESS)
94 {
95 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
96 return rcStrict;
97 }
98
99 /*
100 * Perform the check.
101 */
102 uint16_t fPortMask = (1 << cbOperand) - 1;
103 bmBytes >>= (u16Port & 7);
104 if (bmBytes & fPortMask)
105 {
106 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
107 u16Port, cbOperand, bmBytes, fPortMask));
108 return iemRaiseGeneralProtectionFault0(pVCpu);
109 }
110
111 return VINF_SUCCESS;
112}
113
114
115/**
116 * Checks if we are allowed to access the given I/O port, raising the
117 * appropriate exceptions if we aren't (or if the I/O bitmap is not
118 * accessible).
119 *
120 * @returns Strict VBox status code.
121 *
122 * @param pVCpu The cross context virtual CPU structure of the calling thread.
123 * @param u16Port The port number.
124 * @param cbOperand The operand size.
125 */
126DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PVMCPU pVCpu, uint16_t u16Port, uint8_t cbOperand)
127{
128 X86EFLAGS Efl;
129 Efl.u = IEMMISC_GET_EFL(pVCpu);
130 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
131 && ( pVCpu->iem.s.uCpl > Efl.Bits.u2IOPL
132 || Efl.Bits.u1VM) )
133 return iemHlpCheckPortIOPermissionBitmap(pVCpu, u16Port, cbOperand);
134 return VINF_SUCCESS;
135}
136
137
138#if 0
139/**
140 * Calculates the parity bit.
141 *
142 * @returns true if the bit is set, false if not.
143 * @param u8Result The least significant byte of the result.
144 */
145static bool iemHlpCalcParityFlag(uint8_t u8Result)
146{
147 /*
148 * Parity is set if the number of bits in the least significant byte of
149 * the result is even.
150 */
151 uint8_t cBits;
152 cBits = u8Result & 1; /* 0 */
153 u8Result >>= 1;
154 cBits += u8Result & 1;
155 u8Result >>= 1;
156 cBits += u8Result & 1;
157 u8Result >>= 1;
158 cBits += u8Result & 1;
159 u8Result >>= 1;
160 cBits += u8Result & 1; /* 4 */
161 u8Result >>= 1;
162 cBits += u8Result & 1;
163 u8Result >>= 1;
164 cBits += u8Result & 1;
165 u8Result >>= 1;
166 cBits += u8Result & 1;
167 return !(cBits & 1);
168}
169#endif /* not used */
170
171
172/**
173 * Updates the specified flags according to a 8-bit result.
174 *
175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
176 * @param u8Result The result to set the flags according to.
177 * @param fToUpdate The flags to update.
178 * @param fUndefined The flags that are specified as undefined.
179 */
180static void iemHlpUpdateArithEFlagsU8(PVMCPU pVCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
181{
182 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
183 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
184 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined);
185 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags;
186}
187
188
189/**
190 * Updates the specified flags according to a 16-bit result.
191 *
192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
193 * @param u16Result The result to set the flags according to.
194 * @param fToUpdate The flags to update.
195 * @param fUndefined The flags that are specified as undefined.
196 */
197static void iemHlpUpdateArithEFlagsU16(PVMCPU pVCpu, uint16_t u16Result, uint32_t fToUpdate, uint32_t fUndefined)
198{
199 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
200 iemAImpl_test_u16(&u16Result, u16Result, &fEFlags);
201 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined);
202 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags;
203}
204
205
206/**
207 * Helper used by iret.
208 *
209 * @param pVCpu The cross context virtual CPU structure of the calling thread.
210 * @param uCpl The new CPL.
211 * @param pSReg Pointer to the segment register.
212 */
213static void iemHlpAdjustSelectorForNewCpl(PVMCPU pVCpu, uint8_t uCpl, PCPUMSELREG pSReg)
214{
215 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
216 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_MASK);
217
218 if ( uCpl > pSReg->Attr.n.u2Dpl
219 && pSReg->Attr.n.u1DescType /* code or data, not system */
220 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
221 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
222 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, 0);
223}
224
225
226/**
227 * Indicates that we have modified the FPU state.
228 *
229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
230 */
231DECLINLINE(void) iemHlpUsedFpu(PVMCPU pVCpu)
232{
233 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
234}
235
236/** @} */
237
238/** @name C Implementations
239 * @{
240 */
241
242/**
243 * Implements a 16-bit popa.
244 */
245IEM_CIMPL_DEF_0(iemCImpl_popa_16)
246{
247 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu);
248 RTGCPTR GCPtrLast = GCPtrStart + 15;
249 VBOXSTRICTRC rcStrict;
250
251 /*
252 * The docs are a bit hard to comprehend here, but it looks like we wrap
253 * around in real mode as long as none of the individual "popa" crosses the
254 * end of the stack segment. In protected mode we check the whole access
255 * in one go. For efficiency, only do the word-by-word thing if we're in
256 * danger of wrapping around.
257 */
258 /** @todo do popa boundary / wrap-around checks. */
259 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
260 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
261 {
262 /* word-by-word */
263 RTUINT64U TmpRsp;
264 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
265 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.di, &TmpRsp);
266 if (rcStrict == VINF_SUCCESS)
267 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.si, &TmpRsp);
268 if (rcStrict == VINF_SUCCESS)
269 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bp, &TmpRsp);
270 if (rcStrict == VINF_SUCCESS)
271 {
272 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */
273 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bx, &TmpRsp);
274 }
275 if (rcStrict == VINF_SUCCESS)
276 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.dx, &TmpRsp);
277 if (rcStrict == VINF_SUCCESS)
278 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.cx, &TmpRsp);
279 if (rcStrict == VINF_SUCCESS)
280 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.ax, &TmpRsp);
281 if (rcStrict == VINF_SUCCESS)
282 {
283 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
284 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
285 }
286 }
287 else
288 {
289 uint16_t const *pa16Mem = NULL;
290 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
291 if (rcStrict == VINF_SUCCESS)
292 {
293 pVCpu->cpum.GstCtx.di = pa16Mem[7 - X86_GREG_xDI];
294 pVCpu->cpum.GstCtx.si = pa16Mem[7 - X86_GREG_xSI];
295 pVCpu->cpum.GstCtx.bp = pa16Mem[7 - X86_GREG_xBP];
296 /* skip sp */
297 pVCpu->cpum.GstCtx.bx = pa16Mem[7 - X86_GREG_xBX];
298 pVCpu->cpum.GstCtx.dx = pa16Mem[7 - X86_GREG_xDX];
299 pVCpu->cpum.GstCtx.cx = pa16Mem[7 - X86_GREG_xCX];
300 pVCpu->cpum.GstCtx.ax = pa16Mem[7 - X86_GREG_xAX];
301 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
302 if (rcStrict == VINF_SUCCESS)
303 {
304 iemRegAddToRsp(pVCpu, 16);
305 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
306 }
307 }
308 }
309 return rcStrict;
310}
311
312
313/**
314 * Implements a 32-bit popa.
315 */
316IEM_CIMPL_DEF_0(iemCImpl_popa_32)
317{
318 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu);
319 RTGCPTR GCPtrLast = GCPtrStart + 31;
320 VBOXSTRICTRC rcStrict;
321
322 /*
323 * The docs are a bit hard to comprehend here, but it looks like we wrap
324 * around in real mode as long as none of the individual "popa" crosses the
325 * end of the stack segment. In protected mode we check the whole access
326 * in one go. For efficiency, only do the word-by-word thing if we're in
327 * danger of wrapping around.
328 */
329 /** @todo do popa boundary / wrap-around checks. */
330 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
331 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
332 {
333 /* word-by-word */
334 RTUINT64U TmpRsp;
335 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
336 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edi, &TmpRsp);
337 if (rcStrict == VINF_SUCCESS)
338 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.esi, &TmpRsp);
339 if (rcStrict == VINF_SUCCESS)
340 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebp, &TmpRsp);
341 if (rcStrict == VINF_SUCCESS)
342 {
343 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */
344 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebx, &TmpRsp);
345 }
346 if (rcStrict == VINF_SUCCESS)
347 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edx, &TmpRsp);
348 if (rcStrict == VINF_SUCCESS)
349 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ecx, &TmpRsp);
350 if (rcStrict == VINF_SUCCESS)
351 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.eax, &TmpRsp);
352 if (rcStrict == VINF_SUCCESS)
353 {
354#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
355 pVCpu->cpum.GstCtx.rdi &= UINT32_MAX;
356 pVCpu->cpum.GstCtx.rsi &= UINT32_MAX;
357 pVCpu->cpum.GstCtx.rbp &= UINT32_MAX;
358 pVCpu->cpum.GstCtx.rbx &= UINT32_MAX;
359 pVCpu->cpum.GstCtx.rdx &= UINT32_MAX;
360 pVCpu->cpum.GstCtx.rcx &= UINT32_MAX;
361 pVCpu->cpum.GstCtx.rax &= UINT32_MAX;
362#endif
363 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
364 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
365 }
366 }
367 else
368 {
369 uint32_t const *pa32Mem;
370 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
371 if (rcStrict == VINF_SUCCESS)
372 {
373 pVCpu->cpum.GstCtx.rdi = pa32Mem[7 - X86_GREG_xDI];
374 pVCpu->cpum.GstCtx.rsi = pa32Mem[7 - X86_GREG_xSI];
375 pVCpu->cpum.GstCtx.rbp = pa32Mem[7 - X86_GREG_xBP];
376 /* skip esp */
377 pVCpu->cpum.GstCtx.rbx = pa32Mem[7 - X86_GREG_xBX];
378 pVCpu->cpum.GstCtx.rdx = pa32Mem[7 - X86_GREG_xDX];
379 pVCpu->cpum.GstCtx.rcx = pa32Mem[7 - X86_GREG_xCX];
380 pVCpu->cpum.GstCtx.rax = pa32Mem[7 - X86_GREG_xAX];
381 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
382 if (rcStrict == VINF_SUCCESS)
383 {
384 iemRegAddToRsp(pVCpu, 32);
385 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
386 }
387 }
388 }
389 return rcStrict;
390}
391
392
393/**
394 * Implements a 16-bit pusha.
395 */
396IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
397{
398 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu);
399 RTGCPTR GCPtrBottom = GCPtrTop - 15;
400 VBOXSTRICTRC rcStrict;
401
402 /*
403 * The docs are a bit hard to comprehend here, but it looks like we wrap
404 * around in real mode as long as none of the individual "pushd" crosses the
405 * end of the stack segment. In protected mode we check the whole access
406 * in one go. For efficiency, only do the word-by-word thing if we're in
407 * danger of wrapping around.
408 */
409 /** @todo do pusha boundary / wrap-around checks. */
410 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
411 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
412 {
413 /* word-by-word */
414 RTUINT64U TmpRsp;
415 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
416 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.ax, &TmpRsp);
417 if (rcStrict == VINF_SUCCESS)
418 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.cx, &TmpRsp);
419 if (rcStrict == VINF_SUCCESS)
420 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.dx, &TmpRsp);
421 if (rcStrict == VINF_SUCCESS)
422 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bx, &TmpRsp);
423 if (rcStrict == VINF_SUCCESS)
424 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.sp, &TmpRsp);
425 if (rcStrict == VINF_SUCCESS)
426 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bp, &TmpRsp);
427 if (rcStrict == VINF_SUCCESS)
428 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.si, &TmpRsp);
429 if (rcStrict == VINF_SUCCESS)
430 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.di, &TmpRsp);
431 if (rcStrict == VINF_SUCCESS)
432 {
433 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
434 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
435 }
436 }
437 else
438 {
439 GCPtrBottom--;
440 uint16_t *pa16Mem = NULL;
441 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
442 if (rcStrict == VINF_SUCCESS)
443 {
444 pa16Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.di;
445 pa16Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.si;
446 pa16Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.bp;
447 pa16Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.sp;
448 pa16Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.bx;
449 pa16Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.dx;
450 pa16Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.cx;
451 pa16Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.ax;
452 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
453 if (rcStrict == VINF_SUCCESS)
454 {
455 iemRegSubFromRsp(pVCpu, 16);
456 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
457 }
458 }
459 }
460 return rcStrict;
461}
462
463
464/**
465 * Implements a 32-bit pusha.
466 */
467IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
468{
469 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu);
470 RTGCPTR GCPtrBottom = GCPtrTop - 31;
471 VBOXSTRICTRC rcStrict;
472
473 /*
474 * The docs are a bit hard to comprehend here, but it looks like we wrap
475 * around in real mode as long as none of the individual "pusha" crosses the
476 * end of the stack segment. In protected mode we check the whole access
477 * in one go. For efficiency, only do the word-by-word thing if we're in
478 * danger of wrapping around.
479 */
480 /** @todo do pusha boundary / wrap-around checks. */
481 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
482 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
483 {
484 /* word-by-word */
485 RTUINT64U TmpRsp;
486 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
487 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.eax, &TmpRsp);
488 if (rcStrict == VINF_SUCCESS)
489 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ecx, &TmpRsp);
490 if (rcStrict == VINF_SUCCESS)
491 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edx, &TmpRsp);
492 if (rcStrict == VINF_SUCCESS)
493 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebx, &TmpRsp);
494 if (rcStrict == VINF_SUCCESS)
495 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esp, &TmpRsp);
496 if (rcStrict == VINF_SUCCESS)
497 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebp, &TmpRsp);
498 if (rcStrict == VINF_SUCCESS)
499 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esi, &TmpRsp);
500 if (rcStrict == VINF_SUCCESS)
501 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edi, &TmpRsp);
502 if (rcStrict == VINF_SUCCESS)
503 {
504 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
505 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
506 }
507 }
508 else
509 {
510 GCPtrBottom--;
511 uint32_t *pa32Mem;
512 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
513 if (rcStrict == VINF_SUCCESS)
514 {
515 pa32Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.edi;
516 pa32Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.esi;
517 pa32Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.ebp;
518 pa32Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.esp;
519 pa32Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.ebx;
520 pa32Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.edx;
521 pa32Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.ecx;
522 pa32Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.eax;
523 rcStrict = iemMemCommitAndUnmap(pVCpu, pa32Mem, IEM_ACCESS_STACK_W);
524 if (rcStrict == VINF_SUCCESS)
525 {
526 iemRegSubFromRsp(pVCpu, 32);
527 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
528 }
529 }
530 }
531 return rcStrict;
532}
533
534
535/**
536 * Implements pushf.
537 *
538 *
539 * @param enmEffOpSize The effective operand size.
540 */
541IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
542{
543 VBOXSTRICTRC rcStrict;
544
545 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_PUSHF))
546 {
547 Log2(("pushf: Guest intercept -> #VMEXIT\n"));
548 IEM_SVM_UPDATE_NRIP(pVCpu);
549 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
550 }
551
552 /*
553 * If we're in V8086 mode some care is required (which is why we're in
554 * doing this in a C implementation).
555 */
556 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
557 if ( (fEfl & X86_EFL_VM)
558 && X86_EFL_GET_IOPL(fEfl) != 3 )
559 {
560 Assert(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE);
561 if ( enmEffOpSize != IEMMODE_16BIT
562 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME))
563 return iemRaiseGeneralProtectionFault0(pVCpu);
564 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
565 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
566 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
567 }
568 else
569 {
570
571 /*
572 * Ok, clear RF and VM, adjust for ancient CPUs, and push the flags.
573 */
574 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
575
576 switch (enmEffOpSize)
577 {
578 case IEMMODE_16BIT:
579 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
580 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_186)
581 fEfl |= UINT16_C(0xf000);
582 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
583 break;
584 case IEMMODE_32BIT:
585 rcStrict = iemMemStackPushU32(pVCpu, fEfl);
586 break;
587 case IEMMODE_64BIT:
588 rcStrict = iemMemStackPushU64(pVCpu, fEfl);
589 break;
590 IEM_NOT_REACHED_DEFAULT_CASE_RET();
591 }
592 }
593 if (rcStrict != VINF_SUCCESS)
594 return rcStrict;
595
596 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
597 return VINF_SUCCESS;
598}
599
600
601/**
602 * Implements popf.
603 *
604 * @param enmEffOpSize The effective operand size.
605 */
606IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
607{
608 uint32_t const fEflOld = IEMMISC_GET_EFL(pVCpu);
609 VBOXSTRICTRC rcStrict;
610 uint32_t fEflNew;
611
612 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_POPF))
613 {
614 Log2(("popf: Guest intercept -> #VMEXIT\n"));
615 IEM_SVM_UPDATE_NRIP(pVCpu);
616 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
617 }
618
619 /*
620 * V8086 is special as usual.
621 */
622 if (fEflOld & X86_EFL_VM)
623 {
624 /*
625 * Almost anything goes if IOPL is 3.
626 */
627 if (X86_EFL_GET_IOPL(fEflOld) == 3)
628 {
629 switch (enmEffOpSize)
630 {
631 case IEMMODE_16BIT:
632 {
633 uint16_t u16Value;
634 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
635 if (rcStrict != VINF_SUCCESS)
636 return rcStrict;
637 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
638 break;
639 }
640 case IEMMODE_32BIT:
641 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
642 if (rcStrict != VINF_SUCCESS)
643 return rcStrict;
644 break;
645 IEM_NOT_REACHED_DEFAULT_CASE_RET();
646 }
647
648 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
649 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
650 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
651 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
652 }
653 /*
654 * Interrupt flag virtualization with CR4.VME=1.
655 */
656 else if ( enmEffOpSize == IEMMODE_16BIT
657 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) )
658 {
659 uint16_t u16Value;
660 RTUINT64U TmpRsp;
661 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
662 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp);
663 if (rcStrict != VINF_SUCCESS)
664 return rcStrict;
665
666 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
667 * or before? */
668 if ( ( (u16Value & X86_EFL_IF)
669 && (fEflOld & X86_EFL_VIP))
670 || (u16Value & X86_EFL_TF) )
671 return iemRaiseGeneralProtectionFault0(pVCpu);
672
673 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
674 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
675 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
676 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
677
678 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
679 }
680 else
681 return iemRaiseGeneralProtectionFault0(pVCpu);
682
683 }
684 /*
685 * Not in V8086 mode.
686 */
687 else
688 {
689 /* Pop the flags. */
690 switch (enmEffOpSize)
691 {
692 case IEMMODE_16BIT:
693 {
694 uint16_t u16Value;
695 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
696 if (rcStrict != VINF_SUCCESS)
697 return rcStrict;
698 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
699
700 /*
701 * Ancient CPU adjustments:
702 * - 8086, 80186, V20/30:
703 * Fixed bits 15:12 bits are not kept correctly internally, mostly for
704 * practical reasons (masking below). We add them when pushing flags.
705 * - 80286:
706 * The NT and IOPL flags cannot be popped from real mode and are
707 * therefore always zero (since a 286 can never exit from PM and
708 * their initial value is zero). This changed on a 386 and can
709 * therefore be used to detect 286 or 386 CPU in real mode.
710 */
711 if ( IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286
712 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
713 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL);
714 break;
715 }
716 case IEMMODE_32BIT:
717 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
718 if (rcStrict != VINF_SUCCESS)
719 return rcStrict;
720 break;
721 case IEMMODE_64BIT:
722 {
723 uint64_t u64Value;
724 rcStrict = iemMemStackPopU64(pVCpu, &u64Value);
725 if (rcStrict != VINF_SUCCESS)
726 return rcStrict;
727 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
728 break;
729 }
730 IEM_NOT_REACHED_DEFAULT_CASE_RET();
731 }
732
733 /* Merge them with the current flags. */
734 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
735 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
736 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
737 || pVCpu->iem.s.uCpl == 0)
738 {
739 fEflNew &= fPopfBits;
740 fEflNew |= ~fPopfBits & fEflOld;
741 }
742 else if (pVCpu->iem.s.uCpl <= X86_EFL_GET_IOPL(fEflOld))
743 {
744 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
745 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
746 }
747 else
748 {
749 fEflNew &= fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF);
750 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
751 }
752 }
753
754 /*
755 * Commit the flags.
756 */
757 Assert(fEflNew & RT_BIT_32(1));
758 IEMMISC_SET_EFL(pVCpu, fEflNew);
759 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
760
761 return VINF_SUCCESS;
762}
763
764
765/**
766 * Implements an indirect call.
767 *
768 * @param uNewPC The new program counter (RIP) value (loaded from the
769 * operand).
770 * @param enmEffOpSize The effective operand size.
771 */
772IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
773{
774 uint16_t uOldPC = pVCpu->cpum.GstCtx.ip + cbInstr;
775 if (uNewPC > pVCpu->cpum.GstCtx.cs.u32Limit)
776 return iemRaiseGeneralProtectionFault0(pVCpu);
777
778 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
779 if (rcStrict != VINF_SUCCESS)
780 return rcStrict;
781
782 pVCpu->cpum.GstCtx.rip = uNewPC;
783 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
784
785#ifndef IEM_WITH_CODE_TLB
786 /* Flush the prefetch buffer. */
787 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
788#endif
789 return VINF_SUCCESS;
790}
791
792
793/**
794 * Implements a 16-bit relative call.
795 *
796 * @param offDisp The displacment offset.
797 */
798IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
799{
800 uint16_t uOldPC = pVCpu->cpum.GstCtx.ip + cbInstr;
801 uint16_t uNewPC = uOldPC + offDisp;
802 if (uNewPC > pVCpu->cpum.GstCtx.cs.u32Limit)
803 return iemRaiseGeneralProtectionFault0(pVCpu);
804
805 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
806 if (rcStrict != VINF_SUCCESS)
807 return rcStrict;
808
809 pVCpu->cpum.GstCtx.rip = uNewPC;
810 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
811
812#ifndef IEM_WITH_CODE_TLB
813 /* Flush the prefetch buffer. */
814 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
815#endif
816 return VINF_SUCCESS;
817}
818
819
820/**
821 * Implements a 32-bit indirect call.
822 *
823 * @param uNewPC The new program counter (RIP) value (loaded from the
824 * operand).
825 * @param enmEffOpSize The effective operand size.
826 */
827IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
828{
829 uint32_t uOldPC = pVCpu->cpum.GstCtx.eip + cbInstr;
830 if (uNewPC > pVCpu->cpum.GstCtx.cs.u32Limit)
831 return iemRaiseGeneralProtectionFault0(pVCpu);
832
833 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
834 if (rcStrict != VINF_SUCCESS)
835 return rcStrict;
836
837 pVCpu->cpum.GstCtx.rip = uNewPC;
838 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
839
840#ifndef IEM_WITH_CODE_TLB
841 /* Flush the prefetch buffer. */
842 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
843#endif
844 return VINF_SUCCESS;
845}
846
847
848/**
849 * Implements a 32-bit relative call.
850 *
851 * @param offDisp The displacment offset.
852 */
853IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
854{
855 uint32_t uOldPC = pVCpu->cpum.GstCtx.eip + cbInstr;
856 uint32_t uNewPC = uOldPC + offDisp;
857 if (uNewPC > pVCpu->cpum.GstCtx.cs.u32Limit)
858 return iemRaiseGeneralProtectionFault0(pVCpu);
859
860 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
861 if (rcStrict != VINF_SUCCESS)
862 return rcStrict;
863
864 pVCpu->cpum.GstCtx.rip = uNewPC;
865 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
866
867#ifndef IEM_WITH_CODE_TLB
868 /* Flush the prefetch buffer. */
869 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
870#endif
871 return VINF_SUCCESS;
872}
873
874
875/**
876 * Implements a 64-bit indirect call.
877 *
878 * @param uNewPC The new program counter (RIP) value (loaded from the
879 * operand).
880 * @param enmEffOpSize The effective operand size.
881 */
882IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
883{
884 uint64_t uOldPC = pVCpu->cpum.GstCtx.rip + cbInstr;
885 if (!IEM_IS_CANONICAL(uNewPC))
886 return iemRaiseGeneralProtectionFault0(pVCpu);
887
888 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
889 if (rcStrict != VINF_SUCCESS)
890 return rcStrict;
891
892 pVCpu->cpum.GstCtx.rip = uNewPC;
893 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
894
895#ifndef IEM_WITH_CODE_TLB
896 /* Flush the prefetch buffer. */
897 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
898#endif
899 return VINF_SUCCESS;
900}
901
902
903/**
904 * Implements a 64-bit relative call.
905 *
906 * @param offDisp The displacment offset.
907 */
908IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
909{
910 uint64_t uOldPC = pVCpu->cpum.GstCtx.rip + cbInstr;
911 uint64_t uNewPC = uOldPC + offDisp;
912 if (!IEM_IS_CANONICAL(uNewPC))
913 return iemRaiseNotCanonical(pVCpu);
914
915 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
916 if (rcStrict != VINF_SUCCESS)
917 return rcStrict;
918
919 pVCpu->cpum.GstCtx.rip = uNewPC;
920 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
921
922#ifndef IEM_WITH_CODE_TLB
923 /* Flush the prefetch buffer. */
924 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
925#endif
926
927 return VINF_SUCCESS;
928}
929
930
931/**
932 * Implements far jumps and calls thru task segments (TSS).
933 *
934 * @param uSel The selector.
935 * @param enmBranch The kind of branching we're performing.
936 * @param enmEffOpSize The effective operand size.
937 * @param pDesc The descriptor corresponding to @a uSel. The type is
938 * task gate.
939 */
940IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
941{
942#ifndef IEM_IMPLEMENTS_TASKSWITCH
943 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
944#else
945 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
946 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
947 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
948 RT_NOREF_PV(enmEffOpSize);
949 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
950
951 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
952 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
953 {
954 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
955 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
956 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
957 }
958
959 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
960 * far calls (see iemCImpl_callf). Most likely in both cases it should be
961 * checked here, need testcases. */
962 if (!pDesc->Legacy.Gen.u1Present)
963 {
964 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel));
965 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
966 }
967
968 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
969 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
970 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc);
971#endif
972}
973
974
975/**
976 * Implements far jumps and calls thru task gates.
977 *
978 * @param uSel The selector.
979 * @param enmBranch The kind of branching we're performing.
980 * @param enmEffOpSize The effective operand size.
981 * @param pDesc The descriptor corresponding to @a uSel. The type is
982 * task gate.
983 */
984IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
985{
986#ifndef IEM_IMPLEMENTS_TASKSWITCH
987 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
988#else
989 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
990 RT_NOREF_PV(enmEffOpSize);
991 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
992
993 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
994 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
995 {
996 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
997 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
998 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
999 }
1000
1001 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
1002 * far calls (see iemCImpl_callf). Most likely in both cases it should be
1003 * checked here, need testcases. */
1004 if (!pDesc->Legacy.Gen.u1Present)
1005 {
1006 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel));
1007 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1008 }
1009
1010 /*
1011 * Fetch the new TSS descriptor from the GDT.
1012 */
1013 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel;
1014 if (uSelTss & X86_SEL_LDT)
1015 {
1016 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss));
1017 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1018 }
1019
1020 IEMSELDESC TssDesc;
1021 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelTss, X86_XCPT_GP);
1022 if (rcStrict != VINF_SUCCESS)
1023 return rcStrict;
1024
1025 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK)
1026 {
1027 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss,
1028 TssDesc.Legacy.Gate.u4Type));
1029 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1030 }
1031
1032 if (!TssDesc.Legacy.Gate.u1Present)
1033 {
1034 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss));
1035 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelTss & X86_SEL_MASK_OFF_RPL);
1036 }
1037
1038 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
1039 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1040 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc);
1041#endif
1042}
1043
1044
1045/**
1046 * Implements far jumps and calls thru call gates.
1047 *
1048 * @param uSel The selector.
1049 * @param enmBranch The kind of branching we're performing.
1050 * @param enmEffOpSize The effective operand size.
1051 * @param pDesc The descriptor corresponding to @a uSel. The type is
1052 * call gate.
1053 */
1054IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1055{
1056#define IEM_IMPLEMENTS_CALLGATE
1057#ifndef IEM_IMPLEMENTS_CALLGATE
1058 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1059#else
1060 RT_NOREF_PV(enmEffOpSize);
1061 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1062
1063 /* NB: Far jumps can only do intra-privilege transfers. Far calls support
1064 * inter-privilege calls and are much more complex.
1065 *
1066 * NB: 64-bit call gate has the same type as a 32-bit call gate! If
1067 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
1068 * must be 16-bit or 32-bit.
1069 */
1070 /** @todo: effective operand size is probably irrelevant here, only the
1071 * call gate bitness matters??
1072 */
1073 VBOXSTRICTRC rcStrict;
1074 RTPTRUNION uPtrRet;
1075 uint64_t uNewRsp;
1076 uint64_t uNewRip;
1077 uint64_t u64Base;
1078 uint32_t cbLimit;
1079 RTSEL uNewCS;
1080 IEMSELDESC DescCS;
1081
1082 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE);
1083 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1084 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE
1085 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE);
1086
1087 /* Determine the new instruction pointer from the gate descriptor. */
1088 uNewRip = pDesc->Legacy.Gate.u16OffsetLow
1089 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16)
1090 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32);
1091
1092 /* Perform DPL checks on the gate descriptor. */
1093 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
1094 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1095 {
1096 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1097 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1098 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1099 }
1100
1101 /** @todo does this catch NULL selectors, too? */
1102 if (!pDesc->Legacy.Gen.u1Present)
1103 {
1104 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel));
1105 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1106 }
1107
1108 /*
1109 * Fetch the target CS descriptor from the GDT or LDT.
1110 */
1111 uNewCS = pDesc->Legacy.Gate.u16Sel;
1112 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_GP);
1113 if (rcStrict != VINF_SUCCESS)
1114 return rcStrict;
1115
1116 /* Target CS must be a code selector. */
1117 if ( !DescCS.Legacy.Gen.u1DescType
1118 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1119 {
1120 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1121 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1122 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1123 }
1124
1125 /* Privilege checks on target CS. */
1126 if (enmBranch == IEMBRANCH_JUMP)
1127 {
1128 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1129 {
1130 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
1131 {
1132 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1133 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1134 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1135 }
1136 }
1137 else
1138 {
1139 if (DescCS.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl)
1140 {
1141 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1142 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1143 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1144 }
1145 }
1146 }
1147 else
1148 {
1149 Assert(enmBranch == IEMBRANCH_CALL);
1150 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
1151 {
1152 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1153 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1154 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1155 }
1156 }
1157
1158 /* Additional long mode checks. */
1159 if (IEM_IS_LONG_MODE(pVCpu))
1160 {
1161 if (!DescCS.Legacy.Gen.u1Long)
1162 {
1163 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS));
1164 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1165 }
1166
1167 /* L vs D. */
1168 if ( DescCS.Legacy.Gen.u1Long
1169 && DescCS.Legacy.Gen.u1DefBig)
1170 {
1171 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS));
1172 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1173 }
1174 }
1175
1176 if (!DescCS.Legacy.Gate.u1Present)
1177 {
1178 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS));
1179 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCS);
1180 }
1181
1182 if (enmBranch == IEMBRANCH_JUMP)
1183 {
1184 /** @todo: This is very similar to regular far jumps; merge! */
1185 /* Jumps are fairly simple... */
1186
1187 /* Chop the high bits off if 16-bit gate (Intel says so). */
1188 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1189 uNewRip = (uint16_t)uNewRip;
1190
1191 /* Limit check for non-long segments. */
1192 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1193 if (DescCS.Legacy.Gen.u1Long)
1194 u64Base = 0;
1195 else
1196 {
1197 if (uNewRip > cbLimit)
1198 {
1199 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
1200 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1201 }
1202 u64Base = X86DESC_BASE(&DescCS.Legacy);
1203 }
1204
1205 /* Canonical address check. */
1206 if (!IEM_IS_CANONICAL(uNewRip))
1207 {
1208 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1209 return iemRaiseNotCanonical(pVCpu);
1210 }
1211
1212 /*
1213 * Ok, everything checked out fine. Now set the accessed bit before
1214 * committing the result into CS, CSHID and RIP.
1215 */
1216 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1217 {
1218 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1219 if (rcStrict != VINF_SUCCESS)
1220 return rcStrict;
1221 /** @todo check what VT-x and AMD-V does. */
1222 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1223 }
1224
1225 /* commit */
1226 pVCpu->cpum.GstCtx.rip = uNewRip;
1227 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1228 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */
1229 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1230 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1231 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1232 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1233 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1234 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1235 }
1236 else
1237 {
1238 Assert(enmBranch == IEMBRANCH_CALL);
1239 /* Calls are much more complicated. */
1240
1241 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) && (DescCS.Legacy.Gen.u2Dpl < pVCpu->iem.s.uCpl))
1242 {
1243 uint16_t offNewStack; /* Offset of new stack in TSS. */
1244 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */
1245 uint8_t uNewCSDpl;
1246 uint8_t cbWords;
1247 RTSEL uNewSS;
1248 RTSEL uOldSS;
1249 uint64_t uOldRsp;
1250 IEMSELDESC DescSS;
1251 RTPTRUNION uPtrTSS;
1252 RTGCPTR GCPtrTSS;
1253 RTPTRUNION uPtrParmWds;
1254 RTGCPTR GCPtrParmWds;
1255
1256 /* More privilege. This is the fun part. */
1257 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */
1258
1259 /*
1260 * Determine new SS:rSP from the TSS.
1261 */
1262 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType);
1263
1264 /* Figure out where the new stack pointer is stored in the TSS. */
1265 uNewCSDpl = DescCS.Legacy.Gen.u2Dpl;
1266 if (!IEM_IS_LONG_MODE(pVCpu))
1267 {
1268 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1269 {
1270 offNewStack = RT_UOFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8;
1271 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0);
1272 }
1273 else
1274 {
1275 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1276 offNewStack = RT_UOFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4;
1277 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0);
1278 }
1279 }
1280 else
1281 {
1282 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1283 offNewStack = RT_UOFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0);
1284 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0);
1285 }
1286
1287 /* Check against TSS limit. */
1288 if ((uint16_t)(offNewStack + cbNewStack - 1) > pVCpu->cpum.GstCtx.tr.u32Limit)
1289 {
1290 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pVCpu->cpum.GstCtx.tr.u32Limit));
1291 return iemRaiseTaskSwitchFaultBySelector(pVCpu, pVCpu->cpum.GstCtx.tr.Sel);
1292 }
1293
1294 GCPtrTSS = pVCpu->cpum.GstCtx.tr.u64Base + offNewStack;
1295 rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R);
1296 if (rcStrict != VINF_SUCCESS)
1297 {
1298 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1299 return rcStrict;
1300 }
1301
1302 if (!IEM_IS_LONG_MODE(pVCpu))
1303 {
1304 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1305 {
1306 uNewRsp = uPtrTSS.pu32[0];
1307 uNewSS = uPtrTSS.pu16[2];
1308 }
1309 else
1310 {
1311 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1312 uNewRsp = uPtrTSS.pu16[0];
1313 uNewSS = uPtrTSS.pu16[1];
1314 }
1315 }
1316 else
1317 {
1318 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1319 /* SS will be a NULL selector, but that's valid. */
1320 uNewRsp = uPtrTSS.pu64[0];
1321 uNewSS = uNewCSDpl;
1322 }
1323
1324 /* Done with the TSS now. */
1325 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);
1326 if (rcStrict != VINF_SUCCESS)
1327 {
1328 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1329 return rcStrict;
1330 }
1331
1332 /* Only used outside of long mode. */
1333 cbWords = pDesc->Legacy.Gate.u5ParmCount;
1334
1335 /* If EFER.LMA is 0, there's extra work to do. */
1336 if (!IEM_IS_LONG_MODE(pVCpu))
1337 {
1338 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
1339 {
1340 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
1341 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1342 }
1343
1344 /* Grab the new SS descriptor. */
1345 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1346 if (rcStrict != VINF_SUCCESS)
1347 return rcStrict;
1348
1349 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */
1350 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL))
1351 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl))
1352 {
1353 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n",
1354 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl));
1355 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1356 }
1357
1358 /* Ensure new SS is a writable data segment. */
1359 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
1360 {
1361 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
1362 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1363 }
1364
1365 if (!DescSS.Legacy.Gen.u1Present)
1366 {
1367 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS));
1368 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
1369 }
1370 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1371 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords);
1372 else
1373 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords);
1374 }
1375 else
1376 {
1377 /* Just grab the new (NULL) SS descriptor. */
1378 /** @todo testcase: Check whether the zero GDT entry is actually loaded here
1379 * like we do... */
1380 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1381 if (rcStrict != VINF_SUCCESS)
1382 return rcStrict;
1383
1384 cbNewStack = sizeof(uint64_t) * 4;
1385 }
1386
1387 /** @todo: According to Intel, new stack is checked for enough space first,
1388 * then switched. According to AMD, the stack is switched first and
1389 * then pushes might fault!
1390 * NB: OS/2 Warp 3/4 actively relies on the fact that possible
1391 * incoming stack #PF happens before actual stack switch. AMD is
1392 * either lying or implicitly assumes that new state is committed
1393 * only if and when an instruction doesn't fault.
1394 */
1395
1396 /** @todo: According to AMD, CS is loaded first, then SS.
1397 * According to Intel, it's the other way around!?
1398 */
1399
1400 /** @todo: Intel and AMD disagree on when exactly the CPL changes! */
1401
1402 /* Set the accessed bit before committing new SS. */
1403 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1404 {
1405 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
1406 if (rcStrict != VINF_SUCCESS)
1407 return rcStrict;
1408 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1409 }
1410
1411 /* Remember the old SS:rSP and their linear address. */
1412 uOldSS = pVCpu->cpum.GstCtx.ss.Sel;
1413 uOldRsp = pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig ? pVCpu->cpum.GstCtx.rsp : pVCpu->cpum.GstCtx.sp;
1414
1415 GCPtrParmWds = pVCpu->cpum.GstCtx.ss.u64Base + uOldRsp;
1416
1417 /* HACK ALERT! Probe if the write to the new stack will succeed. May #SS(NewSS)
1418 or #PF, the former is not implemented in this workaround. */
1419 /** @todo Proper fix callgate target stack exceptions. */
1420 /** @todo testcase: Cover callgates with partially or fully inaccessible
1421 * target stacks. */
1422 void *pvNewFrame;
1423 RTGCPTR GCPtrNewStack = X86DESC_BASE(&DescSS.Legacy) + uNewRsp - cbNewStack;
1424 rcStrict = iemMemMap(pVCpu, &pvNewFrame, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW);
1425 if (rcStrict != VINF_SUCCESS)
1426 {
1427 Log(("BranchCallGate: Incoming stack (%04x:%08RX64) not accessible, rc=%Rrc\n", uNewSS, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
1428 return rcStrict;
1429 }
1430 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewFrame, IEM_ACCESS_SYS_RW);
1431 if (rcStrict != VINF_SUCCESS)
1432 {
1433 Log(("BranchCallGate: New stack probe unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1434 return rcStrict;
1435 }
1436
1437 /* Commit new SS:rSP. */
1438 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
1439 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
1440 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1441 pVCpu->cpum.GstCtx.ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);
1442 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1443 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1444 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1445 pVCpu->iem.s.uCpl = uNewCSDpl;
1446 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1447 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
1448
1449 /* At this point the stack access must not fail because new state was already committed. */
1450 /** @todo this can still fail due to SS.LIMIT not check. */
1451 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbNewStack,
1452 &uPtrRet.pv, &uNewRsp);
1453 AssertMsgReturn(rcStrict == VINF_SUCCESS, ("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)),
1454 VERR_INTERNAL_ERROR_5);
1455
1456 if (!IEM_IS_LONG_MODE(pVCpu))
1457 {
1458 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1459 {
1460 /* Push the old CS:rIP. */
1461 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1462 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1463
1464 if (cbWords)
1465 {
1466 /* Map the relevant chunk of the old stack. */
1467 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1468 if (rcStrict != VINF_SUCCESS)
1469 {
1470 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1471 return rcStrict;
1472 }
1473
1474 /* Copy the parameter (d)words. */
1475 for (int i = 0; i < cbWords; ++i)
1476 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i];
1477
1478 /* Unmap the old stack. */
1479 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1480 if (rcStrict != VINF_SUCCESS)
1481 {
1482 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1483 return rcStrict;
1484 }
1485 }
1486
1487 /* Push the old SS:rSP. */
1488 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
1489 uPtrRet.pu32[2 + cbWords + 1] = uOldSS;
1490 }
1491 else
1492 {
1493 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1494
1495 /* Push the old CS:rIP. */
1496 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1497 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1498
1499 if (cbWords)
1500 {
1501 /* Map the relevant chunk of the old stack. */
1502 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
1503 if (rcStrict != VINF_SUCCESS)
1504 {
1505 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1506 return rcStrict;
1507 }
1508
1509 /* Copy the parameter words. */
1510 for (int i = 0; i < cbWords; ++i)
1511 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i];
1512
1513 /* Unmap the old stack. */
1514 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1515 if (rcStrict != VINF_SUCCESS)
1516 {
1517 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1518 return rcStrict;
1519 }
1520 }
1521
1522 /* Push the old SS:rSP. */
1523 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp;
1524 uPtrRet.pu16[2 + cbWords + 1] = uOldSS;
1525 }
1526 }
1527 else
1528 {
1529 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1530
1531 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
1532 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
1533 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1534 uPtrRet.pu64[2] = uOldRsp;
1535 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */
1536 }
1537
1538 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1539 if (rcStrict != VINF_SUCCESS)
1540 {
1541 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1542 return rcStrict;
1543 }
1544
1545 /* Chop the high bits off if 16-bit gate (Intel says so). */
1546 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1547 uNewRip = (uint16_t)uNewRip;
1548
1549 /* Limit / canonical check. */
1550 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1551 if (!IEM_IS_LONG_MODE(pVCpu))
1552 {
1553 if (uNewRip > cbLimit)
1554 {
1555 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1556 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1557 }
1558 u64Base = X86DESC_BASE(&DescCS.Legacy);
1559 }
1560 else
1561 {
1562 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1563 if (!IEM_IS_CANONICAL(uNewRip))
1564 {
1565 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1566 return iemRaiseNotCanonical(pVCpu);
1567 }
1568 u64Base = 0;
1569 }
1570
1571 /*
1572 * Now set the accessed bit before
1573 * writing the return address to the stack and committing the result into
1574 * CS, CSHID and RIP.
1575 */
1576 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1577 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1578 {
1579 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1580 if (rcStrict != VINF_SUCCESS)
1581 return rcStrict;
1582 /** @todo check what VT-x and AMD-V does. */
1583 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1584 }
1585
1586 /* Commit new CS:rIP. */
1587 pVCpu->cpum.GstCtx.rip = uNewRip;
1588 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1589 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl;
1590 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1591 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1592 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1593 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1594 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1595 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1596 }
1597 else
1598 {
1599 /* Same privilege. */
1600 /** @todo: This is very similar to regular far calls; merge! */
1601
1602 /* Check stack first - may #SS(0). */
1603 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
1604 * 16-bit code cause a two or four byte CS to be pushed? */
1605 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
1606 IEM_IS_LONG_MODE(pVCpu) ? 8+8
1607 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
1608 &uPtrRet.pv, &uNewRsp);
1609 if (rcStrict != VINF_SUCCESS)
1610 return rcStrict;
1611
1612 /* Chop the high bits off if 16-bit gate (Intel says so). */
1613 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1614 uNewRip = (uint16_t)uNewRip;
1615
1616 /* Limit / canonical check. */
1617 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1618 if (!IEM_IS_LONG_MODE(pVCpu))
1619 {
1620 if (uNewRip > cbLimit)
1621 {
1622 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1623 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1624 }
1625 u64Base = X86DESC_BASE(&DescCS.Legacy);
1626 }
1627 else
1628 {
1629 if (!IEM_IS_CANONICAL(uNewRip))
1630 {
1631 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1632 return iemRaiseNotCanonical(pVCpu);
1633 }
1634 u64Base = 0;
1635 }
1636
1637 /*
1638 * Now set the accessed bit before
1639 * writing the return address to the stack and committing the result into
1640 * CS, CSHID and RIP.
1641 */
1642 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1643 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1644 {
1645 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1646 if (rcStrict != VINF_SUCCESS)
1647 return rcStrict;
1648 /** @todo check what VT-x and AMD-V does. */
1649 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1650 }
1651
1652 /* stack */
1653 if (!IEM_IS_LONG_MODE(pVCpu))
1654 {
1655 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1656 {
1657 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1658 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1659 }
1660 else
1661 {
1662 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1663 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1664 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1665 }
1666 }
1667 else
1668 {
1669 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1670 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
1671 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1672 }
1673
1674 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1675 if (rcStrict != VINF_SUCCESS)
1676 return rcStrict;
1677
1678 /* commit */
1679 pVCpu->cpum.GstCtx.rip = uNewRip;
1680 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1681 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl;
1682 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1683 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1684 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1685 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1686 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1687 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1688 }
1689 }
1690 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
1691
1692 /* Flush the prefetch buffer. */
1693# ifdef IEM_WITH_CODE_TLB
1694 pVCpu->iem.s.pbInstrBuf = NULL;
1695# else
1696 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
1697# endif
1698 return VINF_SUCCESS;
1699#endif
1700}
1701
1702
1703/**
1704 * Implements far jumps and calls thru system selectors.
1705 *
1706 * @param uSel The selector.
1707 * @param enmBranch The kind of branching we're performing.
1708 * @param enmEffOpSize The effective operand size.
1709 * @param pDesc The descriptor corresponding to @a uSel.
1710 */
1711IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
1712{
1713 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1714 Assert((uSel & X86_SEL_MASK_OFF_RPL));
1715 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1716
1717 if (IEM_IS_LONG_MODE(pVCpu))
1718 switch (pDesc->Legacy.Gen.u4Type)
1719 {
1720 case AMD64_SEL_TYPE_SYS_CALL_GATE:
1721 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1722
1723 default:
1724 case AMD64_SEL_TYPE_SYS_LDT:
1725 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
1726 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
1727 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
1728 case AMD64_SEL_TYPE_SYS_INT_GATE:
1729 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1730 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1731 }
1732
1733 switch (pDesc->Legacy.Gen.u4Type)
1734 {
1735 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1736 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1737 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
1738
1739 case X86_SEL_TYPE_SYS_TASK_GATE:
1740 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
1741
1742 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1743 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1744 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
1745
1746 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1747 Log(("branch %04x -> busy 286 TSS\n", uSel));
1748 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1749
1750 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1751 Log(("branch %04x -> busy 386 TSS\n", uSel));
1752 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1753
1754 default:
1755 case X86_SEL_TYPE_SYS_LDT:
1756 case X86_SEL_TYPE_SYS_286_INT_GATE:
1757 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1758 case X86_SEL_TYPE_SYS_386_INT_GATE:
1759 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1760 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1761 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1762 }
1763}
1764
1765
1766/**
1767 * Implements far jumps.
1768 *
1769 * @param uSel The selector.
1770 * @param offSeg The segment offset.
1771 * @param enmEffOpSize The effective operand size.
1772 */
1773IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1774{
1775 NOREF(cbInstr);
1776 Assert(offSeg <= UINT32_MAX);
1777
1778 /*
1779 * Real mode and V8086 mode are easy. The only snag seems to be that
1780 * CS.limit doesn't change and the limit check is done against the current
1781 * limit.
1782 */
1783 /** @todo Robert Collins claims (The Segment Descriptor Cache, DDJ August
1784 * 1998) that up to and including the Intel 486, far control
1785 * transfers in real mode set default CS attributes (0x93) and also
1786 * set a 64K segment limit. Starting with the Pentium, the
1787 * attributes and limit are left alone but the access rights are
1788 * ignored. We only implement the Pentium+ behavior.
1789 * */
1790 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
1791 {
1792 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1793 if (offSeg > pVCpu->cpum.GstCtx.cs.u32Limit)
1794 {
1795 Log(("iemCImpl_FarJmp: 16-bit limit\n"));
1796 return iemRaiseGeneralProtectionFault0(pVCpu);
1797 }
1798
1799 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
1800 pVCpu->cpum.GstCtx.rip = offSeg;
1801 else
1802 pVCpu->cpum.GstCtx.rip = offSeg & UINT16_MAX;
1803 pVCpu->cpum.GstCtx.cs.Sel = uSel;
1804 pVCpu->cpum.GstCtx.cs.ValidSel = uSel;
1805 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1806 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4;
1807 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
1808 return VINF_SUCCESS;
1809 }
1810
1811 /*
1812 * Protected mode. Need to parse the specified descriptor...
1813 */
1814 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1815 {
1816 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1817 return iemRaiseGeneralProtectionFault0(pVCpu);
1818 }
1819
1820 /* Fetch the descriptor. */
1821 IEMSELDESC Desc;
1822 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
1823 if (rcStrict != VINF_SUCCESS)
1824 return rcStrict;
1825
1826 /* Is it there? */
1827 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
1828 {
1829 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1830 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1831 }
1832
1833 /*
1834 * Deal with it according to its type. We do the standard code selectors
1835 * here and dispatch the system selectors to worker functions.
1836 */
1837 if (!Desc.Legacy.Gen.u1DescType)
1838 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
1839
1840 /* Only code segments. */
1841 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1842 {
1843 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1844 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1845 }
1846
1847 /* L vs D. */
1848 if ( Desc.Legacy.Gen.u1Long
1849 && Desc.Legacy.Gen.u1DefBig
1850 && IEM_IS_LONG_MODE(pVCpu))
1851 {
1852 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1853 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1854 }
1855
1856 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1857 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1858 {
1859 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl)
1860 {
1861 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1862 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1863 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1864 }
1865 }
1866 else
1867 {
1868 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl)
1869 {
1870 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1871 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1872 }
1873 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl)
1874 {
1875 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl));
1876 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1877 }
1878 }
1879
1880 /* Chop the high bits if 16-bit (Intel says so). */
1881 if (enmEffOpSize == IEMMODE_16BIT)
1882 offSeg &= UINT16_MAX;
1883
1884 /* Limit check. (Should alternatively check for non-canonical addresses
1885 here, but that is ruled out by offSeg being 32-bit, right?) */
1886 uint64_t u64Base;
1887 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1888 if (Desc.Legacy.Gen.u1Long)
1889 u64Base = 0;
1890 else
1891 {
1892 if (offSeg > cbLimit)
1893 {
1894 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1895 /** @todo: Intel says this is #GP(0)! */
1896 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1897 }
1898 u64Base = X86DESC_BASE(&Desc.Legacy);
1899 }
1900
1901 /*
1902 * Ok, everything checked out fine. Now set the accessed bit before
1903 * committing the result into CS, CSHID and RIP.
1904 */
1905 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1906 {
1907 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
1908 if (rcStrict != VINF_SUCCESS)
1909 return rcStrict;
1910 /** @todo check what VT-x and AMD-V does. */
1911 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1912 }
1913
1914 /* commit */
1915 pVCpu->cpum.GstCtx.rip = offSeg;
1916 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1917 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */
1918 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1919 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1920 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1921 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1922 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1923 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1924 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
1925 /** @todo check if the hidden bits are loaded correctly for 64-bit
1926 * mode. */
1927
1928 /* Flush the prefetch buffer. */
1929#ifdef IEM_WITH_CODE_TLB
1930 pVCpu->iem.s.pbInstrBuf = NULL;
1931#else
1932 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
1933#endif
1934
1935 return VINF_SUCCESS;
1936}
1937
1938
1939/**
1940 * Implements far calls.
1941 *
1942 * This very similar to iemCImpl_FarJmp.
1943 *
1944 * @param uSel The selector.
1945 * @param offSeg The segment offset.
1946 * @param enmEffOpSize The operand size (in case we need it).
1947 */
1948IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1949{
1950 VBOXSTRICTRC rcStrict;
1951 uint64_t uNewRsp;
1952 RTPTRUNION uPtrRet;
1953
1954 /*
1955 * Real mode and V8086 mode are easy. The only snag seems to be that
1956 * CS.limit doesn't change and the limit check is done against the current
1957 * limit.
1958 */
1959 /** @todo See comment for similar code in iemCImpl_FarJmp */
1960 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
1961 {
1962 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1963
1964 /* Check stack first - may #SS(0). */
1965 rcStrict = iemMemStackPushBeginSpecial(pVCpu, enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1966 &uPtrRet.pv, &uNewRsp);
1967 if (rcStrict != VINF_SUCCESS)
1968 return rcStrict;
1969
1970 /* Check the target address range. */
1971 if (offSeg > UINT32_MAX)
1972 return iemRaiseGeneralProtectionFault0(pVCpu);
1973
1974 /* Everything is fine, push the return address. */
1975 if (enmEffOpSize == IEMMODE_16BIT)
1976 {
1977 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1978 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1979 }
1980 else
1981 {
1982 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1983 uPtrRet.pu16[2] = pVCpu->cpum.GstCtx.cs.Sel;
1984 }
1985 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1986 if (rcStrict != VINF_SUCCESS)
1987 return rcStrict;
1988
1989 /* Branch. */
1990 pVCpu->cpum.GstCtx.rip = offSeg;
1991 pVCpu->cpum.GstCtx.cs.Sel = uSel;
1992 pVCpu->cpum.GstCtx.cs.ValidSel = uSel;
1993 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1994 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4;
1995 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
1996 return VINF_SUCCESS;
1997 }
1998
1999 /*
2000 * Protected mode. Need to parse the specified descriptor...
2001 */
2002 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2003 {
2004 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
2005 return iemRaiseGeneralProtectionFault0(pVCpu);
2006 }
2007
2008 /* Fetch the descriptor. */
2009 IEMSELDESC Desc;
2010 rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
2011 if (rcStrict != VINF_SUCCESS)
2012 return rcStrict;
2013
2014 /*
2015 * Deal with it according to its type. We do the standard code selectors
2016 * here and dispatch the system selectors to worker functions.
2017 */
2018 if (!Desc.Legacy.Gen.u1DescType)
2019 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
2020
2021 /* Only code segments. */
2022 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2023 {
2024 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
2025 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2026 }
2027
2028 /* L vs D. */
2029 if ( Desc.Legacy.Gen.u1Long
2030 && Desc.Legacy.Gen.u1DefBig
2031 && IEM_IS_LONG_MODE(pVCpu))
2032 {
2033 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
2034 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2035 }
2036
2037 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
2038 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2039 {
2040 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl)
2041 {
2042 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
2043 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2044 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2045 }
2046 }
2047 else
2048 {
2049 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl)
2050 {
2051 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2052 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2053 }
2054 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl)
2055 {
2056 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl));
2057 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2058 }
2059 }
2060
2061 /* Is it there? */
2062 if (!Desc.Legacy.Gen.u1Present)
2063 {
2064 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
2065 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
2066 }
2067
2068 /* Check stack first - may #SS(0). */
2069 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
2070 * 16-bit code cause a two or four byte CS to be pushed? */
2071 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
2072 enmEffOpSize == IEMMODE_64BIT ? 8+8
2073 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2074 &uPtrRet.pv, &uNewRsp);
2075 if (rcStrict != VINF_SUCCESS)
2076 return rcStrict;
2077
2078 /* Chop the high bits if 16-bit (Intel says so). */
2079 if (enmEffOpSize == IEMMODE_16BIT)
2080 offSeg &= UINT16_MAX;
2081
2082 /* Limit / canonical check. */
2083 uint64_t u64Base;
2084 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2085 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2086 {
2087 if (!IEM_IS_CANONICAL(offSeg))
2088 {
2089 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
2090 return iemRaiseNotCanonical(pVCpu);
2091 }
2092 u64Base = 0;
2093 }
2094 else
2095 {
2096 if (offSeg > cbLimit)
2097 {
2098 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
2099 /** @todo: Intel says this is #GP(0)! */
2100 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2101 }
2102 u64Base = X86DESC_BASE(&Desc.Legacy);
2103 }
2104
2105 /*
2106 * Now set the accessed bit before
2107 * writing the return address to the stack and committing the result into
2108 * CS, CSHID and RIP.
2109 */
2110 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2111 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2112 {
2113 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2114 if (rcStrict != VINF_SUCCESS)
2115 return rcStrict;
2116 /** @todo check what VT-x and AMD-V does. */
2117 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2118 }
2119
2120 /* stack */
2121 if (enmEffOpSize == IEMMODE_16BIT)
2122 {
2123 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
2124 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
2125 }
2126 else if (enmEffOpSize == IEMMODE_32BIT)
2127 {
2128 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
2129 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
2130 }
2131 else
2132 {
2133 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
2134 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
2135 }
2136 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2137 if (rcStrict != VINF_SUCCESS)
2138 return rcStrict;
2139
2140 /* commit */
2141 pVCpu->cpum.GstCtx.rip = offSeg;
2142 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2143 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl;
2144 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
2145 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2146 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2147 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2148 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2149 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
2150 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
2151 /** @todo check if the hidden bits are loaded correctly for 64-bit
2152 * mode. */
2153
2154 /* Flush the prefetch buffer. */
2155#ifdef IEM_WITH_CODE_TLB
2156 pVCpu->iem.s.pbInstrBuf = NULL;
2157#else
2158 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2159#endif
2160 return VINF_SUCCESS;
2161}
2162
2163
2164/**
2165 * Implements retf.
2166 *
2167 * @param enmEffOpSize The effective operand size.
2168 * @param cbPop The amount of arguments to pop from the stack
2169 * (bytes).
2170 */
2171IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2172{
2173 VBOXSTRICTRC rcStrict;
2174 RTCPTRUNION uPtrFrame;
2175 uint64_t uNewRsp;
2176 uint64_t uNewRip;
2177 uint16_t uNewCs;
2178 NOREF(cbInstr);
2179
2180 /*
2181 * Read the stack values first.
2182 */
2183 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
2184 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
2185 rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
2186 if (rcStrict != VINF_SUCCESS)
2187 return rcStrict;
2188 if (enmEffOpSize == IEMMODE_16BIT)
2189 {
2190 uNewRip = uPtrFrame.pu16[0];
2191 uNewCs = uPtrFrame.pu16[1];
2192 }
2193 else if (enmEffOpSize == IEMMODE_32BIT)
2194 {
2195 uNewRip = uPtrFrame.pu32[0];
2196 uNewCs = uPtrFrame.pu16[2];
2197 }
2198 else
2199 {
2200 uNewRip = uPtrFrame.pu64[0];
2201 uNewCs = uPtrFrame.pu16[4];
2202 }
2203 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2204 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2205 { /* extremely likely */ }
2206 else
2207 return rcStrict;
2208
2209 /*
2210 * Real mode and V8086 mode are easy.
2211 */
2212 /** @todo See comment for similar code in iemCImpl_FarJmp */
2213 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2214 {
2215 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2216 /** @todo check how this is supposed to work if sp=0xfffe. */
2217
2218 /* Check the limit of the new EIP. */
2219 /** @todo Intel pseudo code only does the limit check for 16-bit
2220 * operands, AMD does not make any distinction. What is right? */
2221 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
2222 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2223
2224 /* commit the operation. */
2225 pVCpu->cpum.GstCtx.rsp = uNewRsp;
2226 pVCpu->cpum.GstCtx.rip = uNewRip;
2227 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2228 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2229 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2230 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4;
2231 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
2232 if (cbPop)
2233 iemRegAddToRsp(pVCpu, cbPop);
2234 return VINF_SUCCESS;
2235 }
2236
2237 /*
2238 * Protected mode is complicated, of course.
2239 */
2240 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2241 {
2242 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
2243 return iemRaiseGeneralProtectionFault0(pVCpu);
2244 }
2245
2246 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2247
2248 /* Fetch the descriptor. */
2249 IEMSELDESC DescCs;
2250 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCs, uNewCs, X86_XCPT_GP);
2251 if (rcStrict != VINF_SUCCESS)
2252 return rcStrict;
2253
2254 /* Can only return to a code selector. */
2255 if ( !DescCs.Legacy.Gen.u1DescType
2256 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
2257 {
2258 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
2259 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
2260 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2261 }
2262
2263 /* L vs D. */
2264 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
2265 && DescCs.Legacy.Gen.u1DefBig
2266 && IEM_IS_LONG_MODE(pVCpu))
2267 {
2268 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
2269 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2270 }
2271
2272 /* DPL/RPL/CPL checks. */
2273 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
2274 {
2275 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl));
2276 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2277 }
2278
2279 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2280 {
2281 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
2282 {
2283 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
2284 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2285 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2286 }
2287 }
2288 else
2289 {
2290 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
2291 {
2292 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
2293 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2294 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2295 }
2296 }
2297
2298 /* Is it there? */
2299 if (!DescCs.Legacy.Gen.u1Present)
2300 {
2301 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
2302 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2303 }
2304
2305 /*
2306 * Return to outer privilege? (We'll typically have entered via a call gate.)
2307 */
2308 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
2309 {
2310 /* Read the outer stack pointer stored *after* the parameters. */
2311 rcStrict = iemMemStackPopContinueSpecial(pVCpu, cbPop + cbRetPtr, &uPtrFrame.pv, &uNewRsp);
2312 if (rcStrict != VINF_SUCCESS)
2313 return rcStrict;
2314
2315 uPtrFrame.pu8 += cbPop; /* Skip the parameters. */
2316
2317 uint16_t uNewOuterSs;
2318 uint64_t uNewOuterRsp;
2319 if (enmEffOpSize == IEMMODE_16BIT)
2320 {
2321 uNewOuterRsp = uPtrFrame.pu16[0];
2322 uNewOuterSs = uPtrFrame.pu16[1];
2323 }
2324 else if (enmEffOpSize == IEMMODE_32BIT)
2325 {
2326 uNewOuterRsp = uPtrFrame.pu32[0];
2327 uNewOuterSs = uPtrFrame.pu16[2];
2328 }
2329 else
2330 {
2331 uNewOuterRsp = uPtrFrame.pu64[0];
2332 uNewOuterSs = uPtrFrame.pu16[4];
2333 }
2334 uPtrFrame.pu8 -= cbPop; /* Put uPtrFrame back the way it was. */
2335 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2336 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2337 { /* extremely likely */ }
2338 else
2339 return rcStrict;
2340
2341 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
2342 and read the selector. */
2343 IEMSELDESC DescSs;
2344 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
2345 {
2346 if ( !DescCs.Legacy.Gen.u1Long
2347 || (uNewOuterSs & X86_SEL_RPL) == 3)
2348 {
2349 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
2350 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2351 return iemRaiseGeneralProtectionFault0(pVCpu);
2352 }
2353 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
2354 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
2355 }
2356 else
2357 {
2358 /* Fetch the descriptor for the new stack segment. */
2359 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);
2360 if (rcStrict != VINF_SUCCESS)
2361 return rcStrict;
2362 }
2363
2364 /* Check that RPL of stack and code selectors match. */
2365 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
2366 {
2367 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2368 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2369 }
2370
2371 /* Must be a writable data segment. */
2372 if ( !DescSs.Legacy.Gen.u1DescType
2373 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2374 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2375 {
2376 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
2377 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
2378 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2379 }
2380
2381 /* L vs D. (Not mentioned by intel.) */
2382 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
2383 && DescSs.Legacy.Gen.u1DefBig
2384 && IEM_IS_LONG_MODE(pVCpu))
2385 {
2386 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
2387 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2388 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2389 }
2390
2391 /* DPL/RPL/CPL checks. */
2392 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2393 {
2394 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
2395 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
2396 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2397 }
2398
2399 /* Is it there? */
2400 if (!DescSs.Legacy.Gen.u1Present)
2401 {
2402 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2403 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2404 }
2405
2406 /* Calc SS limit.*/
2407 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
2408
2409 /* Is RIP canonical or within CS.limit? */
2410 uint64_t u64Base;
2411 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2412
2413 /** @todo Testcase: Is this correct? */
2414 if ( DescCs.Legacy.Gen.u1Long
2415 && IEM_IS_LONG_MODE(pVCpu) )
2416 {
2417 if (!IEM_IS_CANONICAL(uNewRip))
2418 {
2419 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2420 return iemRaiseNotCanonical(pVCpu);
2421 }
2422 u64Base = 0;
2423 }
2424 else
2425 {
2426 if (uNewRip > cbLimitCs)
2427 {
2428 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
2429 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
2430 /** @todo: Intel says this is #GP(0)! */
2431 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2432 }
2433 u64Base = X86DESC_BASE(&DescCs.Legacy);
2434 }
2435
2436 /*
2437 * Now set the accessed bit before
2438 * writing the return address to the stack and committing the result into
2439 * CS, CSHID and RIP.
2440 */
2441 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
2442 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2443 {
2444 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2445 if (rcStrict != VINF_SUCCESS)
2446 return rcStrict;
2447 /** @todo check what VT-x and AMD-V does. */
2448 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2449 }
2450 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
2451 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2452 {
2453 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewOuterSs);
2454 if (rcStrict != VINF_SUCCESS)
2455 return rcStrict;
2456 /** @todo check what VT-x and AMD-V does. */
2457 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2458 }
2459
2460 /* commit */
2461 if (enmEffOpSize == IEMMODE_16BIT)
2462 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2463 else
2464 pVCpu->cpum.GstCtx.rip = uNewRip;
2465 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2466 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2467 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2468 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2469 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs;
2470 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2471 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
2472 pVCpu->cpum.GstCtx.ss.Sel = uNewOuterSs;
2473 pVCpu->cpum.GstCtx.ss.ValidSel = uNewOuterSs;
2474 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2475 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
2476 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
2477 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2478 pVCpu->cpum.GstCtx.ss.u64Base = 0;
2479 else
2480 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
2481 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2482 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewOuterRsp;
2483 else
2484 pVCpu->cpum.GstCtx.rsp = uNewOuterRsp;
2485
2486 pVCpu->iem.s.uCpl = (uNewCs & X86_SEL_RPL);
2487 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds);
2488 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es);
2489 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs);
2490 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs);
2491
2492 /** @todo check if the hidden bits are loaded correctly for 64-bit
2493 * mode. */
2494
2495 if (cbPop)
2496 iemRegAddToRsp(pVCpu, cbPop);
2497 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
2498
2499 /* Done! */
2500 }
2501 /*
2502 * Return to the same privilege level
2503 */
2504 else
2505 {
2506 /* Limit / canonical check. */
2507 uint64_t u64Base;
2508 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2509
2510 /** @todo Testcase: Is this correct? */
2511 if ( DescCs.Legacy.Gen.u1Long
2512 && IEM_IS_LONG_MODE(pVCpu) )
2513 {
2514 if (!IEM_IS_CANONICAL(uNewRip))
2515 {
2516 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
2517 return iemRaiseNotCanonical(pVCpu);
2518 }
2519 u64Base = 0;
2520 }
2521 else
2522 {
2523 if (uNewRip > cbLimitCs)
2524 {
2525 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
2526 /** @todo: Intel says this is #GP(0)! */
2527 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2528 }
2529 u64Base = X86DESC_BASE(&DescCs.Legacy);
2530 }
2531
2532 /*
2533 * Now set the accessed bit before
2534 * writing the return address to the stack and committing the result into
2535 * CS, CSHID and RIP.
2536 */
2537 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2538 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2539 {
2540 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2541 if (rcStrict != VINF_SUCCESS)
2542 return rcStrict;
2543 /** @todo check what VT-x and AMD-V does. */
2544 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2545 }
2546
2547 /* commit */
2548 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2549 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
2550 else
2551 pVCpu->cpum.GstCtx.rsp = uNewRsp;
2552 if (enmEffOpSize == IEMMODE_16BIT)
2553 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2554 else
2555 pVCpu->cpum.GstCtx.rip = uNewRip;
2556 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2557 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2558 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2559 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2560 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs;
2561 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2562 /** @todo check if the hidden bits are loaded correctly for 64-bit
2563 * mode. */
2564 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
2565 if (cbPop)
2566 iemRegAddToRsp(pVCpu, cbPop);
2567 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
2568 }
2569
2570 /* Flush the prefetch buffer. */
2571#ifdef IEM_WITH_CODE_TLB
2572 pVCpu->iem.s.pbInstrBuf = NULL;
2573#else
2574 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2575#endif
2576 return VINF_SUCCESS;
2577}
2578
2579
2580/**
2581 * Implements retn.
2582 *
2583 * We're doing this in C because of the \#GP that might be raised if the popped
2584 * program counter is out of bounds.
2585 *
2586 * @param enmEffOpSize The effective operand size.
2587 * @param cbPop The amount of arguments to pop from the stack
2588 * (bytes).
2589 */
2590IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2591{
2592 NOREF(cbInstr);
2593
2594 /* Fetch the RSP from the stack. */
2595 VBOXSTRICTRC rcStrict;
2596 RTUINT64U NewRip;
2597 RTUINT64U NewRsp;
2598 NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2599
2600 switch (enmEffOpSize)
2601 {
2602 case IEMMODE_16BIT:
2603 NewRip.u = 0;
2604 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRip.Words.w0, &NewRsp);
2605 break;
2606 case IEMMODE_32BIT:
2607 NewRip.u = 0;
2608 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRip.DWords.dw0, &NewRsp);
2609 break;
2610 case IEMMODE_64BIT:
2611 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRip.u, &NewRsp);
2612 break;
2613 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2614 }
2615 if (rcStrict != VINF_SUCCESS)
2616 return rcStrict;
2617
2618 /* Check the new RSP before loading it. */
2619 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
2620 * of it. The canonical test is performed here and for call. */
2621 if (enmEffOpSize != IEMMODE_64BIT)
2622 {
2623 if (NewRip.DWords.dw0 > pVCpu->cpum.GstCtx.cs.u32Limit)
2624 {
2625 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pVCpu->cpum.GstCtx.cs.u32Limit));
2626 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2627 }
2628 }
2629 else
2630 {
2631 if (!IEM_IS_CANONICAL(NewRip.u))
2632 {
2633 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
2634 return iemRaiseNotCanonical(pVCpu);
2635 }
2636 }
2637
2638 /* Apply cbPop */
2639 if (cbPop)
2640 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
2641
2642 /* Commit it. */
2643 pVCpu->cpum.GstCtx.rip = NewRip.u;
2644 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2645 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
2646
2647 /* Flush the prefetch buffer. */
2648#ifndef IEM_WITH_CODE_TLB
2649 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2650#endif
2651
2652 return VINF_SUCCESS;
2653}
2654
2655
2656/**
2657 * Implements enter.
2658 *
2659 * We're doing this in C because the instruction is insane, even for the
2660 * u8NestingLevel=0 case dealing with the stack is tedious.
2661 *
2662 * @param enmEffOpSize The effective operand size.
2663 */
2664IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
2665{
2666 /* Push RBP, saving the old value in TmpRbp. */
2667 RTUINT64U NewRsp; NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2668 RTUINT64U TmpRbp; TmpRbp.u = pVCpu->cpum.GstCtx.rbp;
2669 RTUINT64U NewRbp;
2670 VBOXSTRICTRC rcStrict;
2671 if (enmEffOpSize == IEMMODE_64BIT)
2672 {
2673 rcStrict = iemMemStackPushU64Ex(pVCpu, TmpRbp.u, &NewRsp);
2674 NewRbp = NewRsp;
2675 }
2676 else if (enmEffOpSize == IEMMODE_32BIT)
2677 {
2678 rcStrict = iemMemStackPushU32Ex(pVCpu, TmpRbp.DWords.dw0, &NewRsp);
2679 NewRbp = NewRsp;
2680 }
2681 else
2682 {
2683 rcStrict = iemMemStackPushU16Ex(pVCpu, TmpRbp.Words.w0, &NewRsp);
2684 NewRbp = TmpRbp;
2685 NewRbp.Words.w0 = NewRsp.Words.w0;
2686 }
2687 if (rcStrict != VINF_SUCCESS)
2688 return rcStrict;
2689
2690 /* Copy the parameters (aka nesting levels by Intel). */
2691 cParameters &= 0x1f;
2692 if (cParameters > 0)
2693 {
2694 switch (enmEffOpSize)
2695 {
2696 case IEMMODE_16BIT:
2697 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2698 TmpRbp.DWords.dw0 -= 2;
2699 else
2700 TmpRbp.Words.w0 -= 2;
2701 do
2702 {
2703 uint16_t u16Tmp;
2704 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Tmp, &TmpRbp);
2705 if (rcStrict != VINF_SUCCESS)
2706 break;
2707 rcStrict = iemMemStackPushU16Ex(pVCpu, u16Tmp, &NewRsp);
2708 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2709 break;
2710
2711 case IEMMODE_32BIT:
2712 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2713 TmpRbp.DWords.dw0 -= 4;
2714 else
2715 TmpRbp.Words.w0 -= 4;
2716 do
2717 {
2718 uint32_t u32Tmp;
2719 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Tmp, &TmpRbp);
2720 if (rcStrict != VINF_SUCCESS)
2721 break;
2722 rcStrict = iemMemStackPushU32Ex(pVCpu, u32Tmp, &NewRsp);
2723 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2724 break;
2725
2726 case IEMMODE_64BIT:
2727 TmpRbp.u -= 8;
2728 do
2729 {
2730 uint64_t u64Tmp;
2731 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Tmp, &TmpRbp);
2732 if (rcStrict != VINF_SUCCESS)
2733 break;
2734 rcStrict = iemMemStackPushU64Ex(pVCpu, u64Tmp, &NewRsp);
2735 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2736 break;
2737
2738 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2739 }
2740 if (rcStrict != VINF_SUCCESS)
2741 return VINF_SUCCESS;
2742
2743 /* Push the new RBP */
2744 if (enmEffOpSize == IEMMODE_64BIT)
2745 rcStrict = iemMemStackPushU64Ex(pVCpu, NewRbp.u, &NewRsp);
2746 else if (enmEffOpSize == IEMMODE_32BIT)
2747 rcStrict = iemMemStackPushU32Ex(pVCpu, NewRbp.DWords.dw0, &NewRsp);
2748 else
2749 rcStrict = iemMemStackPushU16Ex(pVCpu, NewRbp.Words.w0, &NewRsp);
2750 if (rcStrict != VINF_SUCCESS)
2751 return rcStrict;
2752
2753 }
2754
2755 /* Recalc RSP. */
2756 iemRegSubFromRspEx(pVCpu, &NewRsp, cbFrame);
2757
2758 /** @todo Should probe write access at the new RSP according to AMD. */
2759
2760 /* Commit it. */
2761 pVCpu->cpum.GstCtx.rbp = NewRbp.u;
2762 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2763 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2764
2765 return VINF_SUCCESS;
2766}
2767
2768
2769
2770/**
2771 * Implements leave.
2772 *
2773 * We're doing this in C because messing with the stack registers is annoying
2774 * since they depends on SS attributes.
2775 *
2776 * @param enmEffOpSize The effective operand size.
2777 */
2778IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
2779{
2780 /* Calculate the intermediate RSP from RBP and the stack attributes. */
2781 RTUINT64U NewRsp;
2782 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2783 NewRsp.u = pVCpu->cpum.GstCtx.rbp;
2784 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2785 NewRsp.u = pVCpu->cpum.GstCtx.ebp;
2786 else
2787 {
2788 /** @todo Check that LEAVE actually preserve the high EBP bits. */
2789 NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2790 NewRsp.Words.w0 = pVCpu->cpum.GstCtx.bp;
2791 }
2792
2793 /* Pop RBP according to the operand size. */
2794 VBOXSTRICTRC rcStrict;
2795 RTUINT64U NewRbp;
2796 switch (enmEffOpSize)
2797 {
2798 case IEMMODE_16BIT:
2799 NewRbp.u = pVCpu->cpum.GstCtx.rbp;
2800 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRbp.Words.w0, &NewRsp);
2801 break;
2802 case IEMMODE_32BIT:
2803 NewRbp.u = 0;
2804 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRbp.DWords.dw0, &NewRsp);
2805 break;
2806 case IEMMODE_64BIT:
2807 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRbp.u, &NewRsp);
2808 break;
2809 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2810 }
2811 if (rcStrict != VINF_SUCCESS)
2812 return rcStrict;
2813
2814
2815 /* Commit it. */
2816 pVCpu->cpum.GstCtx.rbp = NewRbp.u;
2817 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2818 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2819
2820 return VINF_SUCCESS;
2821}
2822
2823
2824/**
2825 * Implements int3 and int XX.
2826 *
2827 * @param u8Int The interrupt vector number.
2828 * @param enmInt The int instruction type.
2829 */
2830IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt)
2831{
2832 Assert(pVCpu->iem.s.cXcptRecursions == 0);
2833 return iemRaiseXcptOrInt(pVCpu,
2834 cbInstr,
2835 u8Int,
2836 IEM_XCPT_FLAGS_T_SOFT_INT | enmInt,
2837 0,
2838 0);
2839}
2840
2841
2842/**
2843 * Implements iret for real mode and V8086 mode.
2844 *
2845 * @param enmEffOpSize The effective operand size.
2846 */
2847IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
2848{
2849 X86EFLAGS Efl;
2850 Efl.u = IEMMISC_GET_EFL(pVCpu);
2851 NOREF(cbInstr);
2852
2853 /*
2854 * iret throws an exception if VME isn't enabled.
2855 */
2856 if ( Efl.Bits.u1VM
2857 && Efl.Bits.u2IOPL != 3
2858 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME))
2859 return iemRaiseGeneralProtectionFault0(pVCpu);
2860
2861 /*
2862 * Do the stack bits, but don't commit RSP before everything checks
2863 * out right.
2864 */
2865 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2866 VBOXSTRICTRC rcStrict;
2867 RTCPTRUNION uFrame;
2868 uint16_t uNewCs;
2869 uint32_t uNewEip;
2870 uint32_t uNewFlags;
2871 uint64_t uNewRsp;
2872 if (enmEffOpSize == IEMMODE_32BIT)
2873 {
2874 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, &uFrame.pv, &uNewRsp);
2875 if (rcStrict != VINF_SUCCESS)
2876 return rcStrict;
2877 uNewEip = uFrame.pu32[0];
2878 if (uNewEip > UINT16_MAX)
2879 return iemRaiseGeneralProtectionFault0(pVCpu);
2880
2881 uNewCs = (uint16_t)uFrame.pu32[1];
2882 uNewFlags = uFrame.pu32[2];
2883 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2884 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
2885 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
2886 | X86_EFL_ID;
2887 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
2888 uNewFlags &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
2889 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
2890 }
2891 else
2892 {
2893 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, &uFrame.pv, &uNewRsp);
2894 if (rcStrict != VINF_SUCCESS)
2895 return rcStrict;
2896 uNewEip = uFrame.pu16[0];
2897 uNewCs = uFrame.pu16[1];
2898 uNewFlags = uFrame.pu16[2];
2899 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2900 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
2901 uNewFlags |= Efl.u & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF);
2902 /** @todo The intel pseudo code does not indicate what happens to
2903 * reserved flags. We just ignore them. */
2904 /* Ancient CPU adjustments: See iemCImpl_popf. */
2905 if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286)
2906 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL);
2907 }
2908 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uFrame.pv);
2909 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2910 { /* extremely likely */ }
2911 else
2912 return rcStrict;
2913
2914 /** @todo Check how this is supposed to work if sp=0xfffe. */
2915 Log7(("iemCImpl_iret_real_v8086: uNewCs=%#06x uNewRip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n",
2916 uNewCs, uNewEip, uNewFlags, uNewRsp));
2917
2918 /*
2919 * Check the limit of the new EIP.
2920 */
2921 /** @todo Only the AMD pseudo code check the limit here, what's
2922 * right? */
2923 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
2924 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2925
2926 /*
2927 * V8086 checks and flag adjustments
2928 */
2929 if (Efl.Bits.u1VM)
2930 {
2931 if (Efl.Bits.u2IOPL == 3)
2932 {
2933 /* Preserve IOPL and clear RF. */
2934 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
2935 uNewFlags |= Efl.u & (X86_EFL_IOPL);
2936 }
2937 else if ( enmEffOpSize == IEMMODE_16BIT
2938 && ( !(uNewFlags & X86_EFL_IF)
2939 || !Efl.Bits.u1VIP )
2940 && !(uNewFlags & X86_EFL_TF) )
2941 {
2942 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
2943 uNewFlags &= ~X86_EFL_VIF;
2944 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
2945 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
2946 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
2947 }
2948 else
2949 return iemRaiseGeneralProtectionFault0(pVCpu);
2950 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags));
2951 }
2952
2953 /*
2954 * Commit the operation.
2955 */
2956#ifdef DBGFTRACE_ENABLED
2957 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
2958 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewRsp);
2959#endif
2960 pVCpu->cpum.GstCtx.rsp = uNewRsp;
2961 pVCpu->cpum.GstCtx.rip = uNewEip;
2962 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2963 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2964 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2965 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4;
2966 /** @todo do we load attribs and limit as well? */
2967 Assert(uNewFlags & X86_EFL_1);
2968 IEMMISC_SET_EFL(pVCpu, uNewFlags);
2969
2970 /* Flush the prefetch buffer. */
2971#ifdef IEM_WITH_CODE_TLB
2972 pVCpu->iem.s.pbInstrBuf = NULL;
2973#else
2974 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2975#endif
2976
2977 return VINF_SUCCESS;
2978}
2979
2980
2981/**
2982 * Loads a segment register when entering V8086 mode.
2983 *
2984 * @param pSReg The segment register.
2985 * @param uSeg The segment to load.
2986 */
2987static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
2988{
2989 pSReg->Sel = uSeg;
2990 pSReg->ValidSel = uSeg;
2991 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2992 pSReg->u64Base = (uint32_t)uSeg << 4;
2993 pSReg->u32Limit = 0xffff;
2994 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
2995 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
2996 * IRET'ing to V8086. */
2997}
2998
2999
3000/**
3001 * Implements iret for protected mode returning to V8086 mode.
3002 *
3003 * @param uNewEip The new EIP.
3004 * @param uNewCs The new CS.
3005 * @param uNewFlags The new EFLAGS.
3006 * @param uNewRsp The RSP after the initial IRET frame.
3007 *
3008 * @note This can only be a 32-bit iret du to the X86_EFL_VM position.
3009 */
3010IEM_CIMPL_DEF_4(iemCImpl_iret_prot_v8086, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp)
3011{
3012 RT_NOREF_PV(cbInstr);
3013 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK);
3014
3015 /*
3016 * Pop the V8086 specific frame bits off the stack.
3017 */
3018 VBOXSTRICTRC rcStrict;
3019 RTCPTRUNION uFrame;
3020 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 24, &uFrame.pv, &uNewRsp);
3021 if (rcStrict != VINF_SUCCESS)
3022 return rcStrict;
3023 uint32_t uNewEsp = uFrame.pu32[0];
3024 uint16_t uNewSs = uFrame.pu32[1];
3025 uint16_t uNewEs = uFrame.pu32[2];
3026 uint16_t uNewDs = uFrame.pu32[3];
3027 uint16_t uNewFs = uFrame.pu32[4];
3028 uint16_t uNewGs = uFrame.pu32[5];
3029 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3030 if (rcStrict != VINF_SUCCESS)
3031 return rcStrict;
3032
3033 /*
3034 * Commit the operation.
3035 */
3036 uNewFlags &= X86_EFL_LIVE_MASK;
3037 uNewFlags |= X86_EFL_RA1_MASK;
3038#ifdef DBGFTRACE_ENABLED
3039 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
3040 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);
3041#endif
3042 Log7(("iemCImpl_iret_prot_v8086: %04x:%08x -> %04x:%04x %x %04x:%04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp));
3043
3044 IEMMISC_SET_EFL(pVCpu, uNewFlags);
3045 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.cs, uNewCs);
3046 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ss, uNewSs);
3047 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.es, uNewEs);
3048 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ds, uNewDs);
3049 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.fs, uNewFs);
3050 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.gs, uNewGs);
3051 pVCpu->cpum.GstCtx.rip = (uint16_t)uNewEip;
3052 pVCpu->cpum.GstCtx.rsp = uNewEsp; /** @todo check this out! */
3053 pVCpu->iem.s.uCpl = 3;
3054
3055 /* Flush the prefetch buffer. */
3056#ifdef IEM_WITH_CODE_TLB
3057 pVCpu->iem.s.pbInstrBuf = NULL;
3058#else
3059 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3060#endif
3061
3062 return VINF_SUCCESS;
3063}
3064
3065
3066/**
3067 * Implements iret for protected mode returning via a nested task.
3068 *
3069 * @param enmEffOpSize The effective operand size.
3070 */
3071IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
3072{
3073 Log7(("iemCImpl_iret_prot_NestedTask:\n"));
3074#ifndef IEM_IMPLEMENTS_TASKSWITCH
3075 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
3076#else
3077 RT_NOREF_PV(enmEffOpSize);
3078
3079 /*
3080 * Read the segment selector in the link-field of the current TSS.
3081 */
3082 RTSEL uSelRet;
3083 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &uSelRet, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base);
3084 if (rcStrict != VINF_SUCCESS)
3085 return rcStrict;
3086
3087 /*
3088 * Fetch the returning task's TSS descriptor from the GDT.
3089 */
3090 if (uSelRet & X86_SEL_LDT)
3091 {
3092 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet));
3093 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet);
3094 }
3095
3096 IEMSELDESC TssDesc;
3097 rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelRet, X86_XCPT_GP);
3098 if (rcStrict != VINF_SUCCESS)
3099 return rcStrict;
3100
3101 if (TssDesc.Legacy.Gate.u1DescType)
3102 {
3103 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet));
3104 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3105 }
3106
3107 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
3108 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
3109 {
3110 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
3111 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3112 }
3113
3114 if (!TssDesc.Legacy.Gate.u1Present)
3115 {
3116 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet));
3117 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3118 }
3119
3120 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
3121 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
3122 0 /* uCr2 */, uSelRet, &TssDesc);
3123#endif
3124}
3125
3126
3127/**
3128 * Implements iret for protected mode
3129 *
3130 * @param enmEffOpSize The effective operand size.
3131 */
3132IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
3133{
3134 NOREF(cbInstr);
3135 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3136
3137 /*
3138 * Nested task return.
3139 */
3140 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT)
3141 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
3142
3143 /*
3144 * Normal return.
3145 *
3146 * Do the stack bits, but don't commit RSP before everything checks
3147 * out right.
3148 */
3149 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3150 VBOXSTRICTRC rcStrict;
3151 RTCPTRUNION uFrame;
3152 uint16_t uNewCs;
3153 uint32_t uNewEip;
3154 uint32_t uNewFlags;
3155 uint64_t uNewRsp;
3156 if (enmEffOpSize == IEMMODE_32BIT)
3157 {
3158 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, &uFrame.pv, &uNewRsp);
3159 if (rcStrict != VINF_SUCCESS)
3160 return rcStrict;
3161 uNewEip = uFrame.pu32[0];
3162 uNewCs = (uint16_t)uFrame.pu32[1];
3163 uNewFlags = uFrame.pu32[2];
3164 }
3165 else
3166 {
3167 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, &uFrame.pv, &uNewRsp);
3168 if (rcStrict != VINF_SUCCESS)
3169 return rcStrict;
3170 uNewEip = uFrame.pu16[0];
3171 uNewCs = uFrame.pu16[1];
3172 uNewFlags = uFrame.pu16[2];
3173 }
3174 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3175 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3176 { /* extremely likely */ }
3177 else
3178 return rcStrict;
3179 Log7(("iemCImpl_iret_prot: uNewCs=%#06x uNewEip=%#010x uNewFlags=%#x uNewRsp=%#18llx uCpl=%u\n", uNewCs, uNewEip, uNewFlags, uNewRsp, pVCpu->iem.s.uCpl));
3180
3181 /*
3182 * We're hopefully not returning to V8086 mode...
3183 */
3184 if ( (uNewFlags & X86_EFL_VM)
3185 && pVCpu->iem.s.uCpl == 0)
3186 {
3187 Assert(enmEffOpSize == IEMMODE_32BIT);
3188 return IEM_CIMPL_CALL_4(iemCImpl_iret_prot_v8086, uNewEip, uNewCs, uNewFlags, uNewRsp);
3189 }
3190
3191 /*
3192 * Protected mode.
3193 */
3194 /* Read the CS descriptor. */
3195 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3196 {
3197 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
3198 return iemRaiseGeneralProtectionFault0(pVCpu);
3199 }
3200
3201 IEMSELDESC DescCS;
3202 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3203 if (rcStrict != VINF_SUCCESS)
3204 {
3205 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
3206 return rcStrict;
3207 }
3208
3209 /* Must be a code descriptor. */
3210 if (!DescCS.Legacy.Gen.u1DescType)
3211 {
3212 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3213 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3214 }
3215 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3216 {
3217 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3218 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3219 }
3220
3221 /* Privilege checks. */
3222 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3223 {
3224 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3225 {
3226 Log(("iret %04x:%08x - RPL != DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3227 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3228 }
3229 }
3230 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3231 {
3232 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3233 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3234 }
3235 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
3236 {
3237 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pVCpu->iem.s.uCpl));
3238 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3239 }
3240
3241 /* Present? */
3242 if (!DescCS.Legacy.Gen.u1Present)
3243 {
3244 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
3245 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3246 }
3247
3248 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3249
3250 /*
3251 * Return to outer level?
3252 */
3253 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
3254 {
3255 uint16_t uNewSS;
3256 uint32_t uNewESP;
3257 if (enmEffOpSize == IEMMODE_32BIT)
3258 {
3259 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 8, &uFrame.pv, &uNewRsp);
3260 if (rcStrict != VINF_SUCCESS)
3261 return rcStrict;
3262/** @todo We might be popping a 32-bit ESP from the IRET frame, but whether
3263 * 16-bit or 32-bit are being loaded into SP depends on the D/B
3264 * bit of the popped SS selector it turns out. */
3265 uNewESP = uFrame.pu32[0];
3266 uNewSS = (uint16_t)uFrame.pu32[1];
3267 }
3268 else
3269 {
3270 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 4, &uFrame.pv, &uNewRsp);
3271 if (rcStrict != VINF_SUCCESS)
3272 return rcStrict;
3273 uNewESP = uFrame.pu16[0];
3274 uNewSS = uFrame.pu16[1];
3275 }
3276 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
3277 if (rcStrict != VINF_SUCCESS)
3278 return rcStrict;
3279 Log7(("iemCImpl_iret_prot: uNewSS=%#06x uNewESP=%#010x\n", uNewSS, uNewESP));
3280
3281 /* Read the SS descriptor. */
3282 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3283 {
3284 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
3285 return iemRaiseGeneralProtectionFault0(pVCpu);
3286 }
3287
3288 IEMSELDESC DescSS;
3289 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
3290 if (rcStrict != VINF_SUCCESS)
3291 {
3292 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
3293 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
3294 return rcStrict;
3295 }
3296
3297 /* Privilege checks. */
3298 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3299 {
3300 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
3301 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3302 }
3303 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3304 {
3305 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
3306 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
3307 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3308 }
3309
3310 /* Must be a writeable data segment descriptor. */
3311 if (!DescSS.Legacy.Gen.u1DescType)
3312 {
3313 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
3314 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3315 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3316 }
3317 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3318 {
3319 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
3320 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3321 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3322 }
3323
3324 /* Present? */
3325 if (!DescSS.Legacy.Gen.u1Present)
3326 {
3327 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
3328 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
3329 }
3330
3331 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3332
3333 /* Check EIP. */
3334 if (uNewEip > cbLimitCS)
3335 {
3336 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
3337 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
3338 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3339 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3340 }
3341
3342 /*
3343 * Commit the changes, marking CS and SS accessed first since
3344 * that may fail.
3345 */
3346 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3347 {
3348 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3349 if (rcStrict != VINF_SUCCESS)
3350 return rcStrict;
3351 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3352 }
3353 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3354 {
3355 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3356 if (rcStrict != VINF_SUCCESS)
3357 return rcStrict;
3358 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3359 }
3360
3361 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3362 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3363 if (enmEffOpSize != IEMMODE_16BIT)
3364 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3365 if (pVCpu->iem.s.uCpl == 0)
3366 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3367 else if (pVCpu->iem.s.uCpl <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL)
3368 fEFlagsMask |= X86_EFL_IF;
3369 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3370 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3371 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu);
3372 fEFlagsNew &= ~fEFlagsMask;
3373 fEFlagsNew |= uNewFlags & fEFlagsMask;
3374#ifdef DBGFTRACE_ENABLED
3375 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
3376 pVCpu->iem.s.uCpl, uNewCs & X86_SEL_RPL, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3377 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP);
3378#endif
3379
3380 IEMMISC_SET_EFL(pVCpu, fEFlagsNew);
3381 pVCpu->cpum.GstCtx.rip = uNewEip;
3382 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3383 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3384 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3385 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3386 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3387 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3388 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
3389
3390 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3391 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3392 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3393 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3394 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
3395 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3396 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3397 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewESP;
3398 else
3399 pVCpu->cpum.GstCtx.rsp = uNewESP;
3400
3401 pVCpu->iem.s.uCpl = uNewCs & X86_SEL_RPL;
3402 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds);
3403 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es);
3404 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs);
3405 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs);
3406
3407 /* Done! */
3408
3409 }
3410 /*
3411 * Return to the same level.
3412 */
3413 else
3414 {
3415 /* Check EIP. */
3416 if (uNewEip > cbLimitCS)
3417 {
3418 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
3419 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3420 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3421 }
3422
3423 /*
3424 * Commit the changes, marking CS first since it may fail.
3425 */
3426 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3427 {
3428 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3429 if (rcStrict != VINF_SUCCESS)
3430 return rcStrict;
3431 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3432 }
3433
3434 X86EFLAGS NewEfl;
3435 NewEfl.u = IEMMISC_GET_EFL(pVCpu);
3436 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3437 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3438 if (enmEffOpSize != IEMMODE_16BIT)
3439 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3440 if (pVCpu->iem.s.uCpl == 0)
3441 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3442 else if (pVCpu->iem.s.uCpl <= NewEfl.Bits.u2IOPL)
3443 fEFlagsMask |= X86_EFL_IF;
3444 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3445 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3446 NewEfl.u &= ~fEFlagsMask;
3447 NewEfl.u |= fEFlagsMask & uNewFlags;
3448#ifdef DBGFTRACE_ENABLED
3449 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
3450 pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3451 uNewCs, uNewEip, uNewFlags, pVCpu->cpum.GstCtx.ss.Sel, uNewRsp);
3452#endif
3453
3454 IEMMISC_SET_EFL(pVCpu, NewEfl.u);
3455 pVCpu->cpum.GstCtx.rip = uNewEip;
3456 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3457 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3458 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3459 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3460 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3461 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3462 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
3463 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3464 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
3465 else
3466 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3467 /* Done! */
3468 }
3469
3470 /* Flush the prefetch buffer. */
3471#ifdef IEM_WITH_CODE_TLB
3472 pVCpu->iem.s.pbInstrBuf = NULL;
3473#else
3474 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3475#endif
3476
3477 return VINF_SUCCESS;
3478}
3479
3480
3481/**
3482 * Implements iret for long mode
3483 *
3484 * @param enmEffOpSize The effective operand size.
3485 */
3486IEM_CIMPL_DEF_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize)
3487{
3488 NOREF(cbInstr);
3489
3490 /*
3491 * Nested task return is not supported in long mode.
3492 */
3493 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT)
3494 {
3495 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.eflags.u));
3496 return iemRaiseGeneralProtectionFault0(pVCpu);
3497 }
3498
3499 /*
3500 * Normal return.
3501 *
3502 * Do the stack bits, but don't commit RSP before everything checks
3503 * out right.
3504 */
3505 VBOXSTRICTRC rcStrict;
3506 RTCPTRUNION uFrame;
3507 uint64_t uNewRip;
3508 uint16_t uNewCs;
3509 uint16_t uNewSs;
3510 uint32_t uNewFlags;
3511 uint64_t uNewRsp;
3512 if (enmEffOpSize == IEMMODE_64BIT)
3513 {
3514 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, &uFrame.pv, &uNewRsp);
3515 if (rcStrict != VINF_SUCCESS)
3516 return rcStrict;
3517 uNewRip = uFrame.pu64[0];
3518 uNewCs = (uint16_t)uFrame.pu64[1];
3519 uNewFlags = (uint32_t)uFrame.pu64[2];
3520 uNewRsp = uFrame.pu64[3];
3521 uNewSs = (uint16_t)uFrame.pu64[4];
3522 }
3523 else if (enmEffOpSize == IEMMODE_32BIT)
3524 {
3525 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, &uFrame.pv, &uNewRsp);
3526 if (rcStrict != VINF_SUCCESS)
3527 return rcStrict;
3528 uNewRip = uFrame.pu32[0];
3529 uNewCs = (uint16_t)uFrame.pu32[1];
3530 uNewFlags = uFrame.pu32[2];
3531 uNewRsp = uFrame.pu32[3];
3532 uNewSs = (uint16_t)uFrame.pu32[4];
3533 }
3534 else
3535 {
3536 Assert(enmEffOpSize == IEMMODE_16BIT);
3537 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, &uFrame.pv, &uNewRsp);
3538 if (rcStrict != VINF_SUCCESS)
3539 return rcStrict;
3540 uNewRip = uFrame.pu16[0];
3541 uNewCs = uFrame.pu16[1];
3542 uNewFlags = uFrame.pu16[2];
3543 uNewRsp = uFrame.pu16[3];
3544 uNewSs = uFrame.pu16[4];
3545 }
3546 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3547 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3548 { /* extremely like */ }
3549 else
3550 return rcStrict;
3551 Log7(("iretq stack: cs:rip=%04x:%016RX64 rflags=%016RX64 ss:rsp=%04x:%016RX64\n", uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
3552
3553 /*
3554 * Check stuff.
3555 */
3556 /* Read the CS descriptor. */
3557 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3558 {
3559 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3560 return iemRaiseGeneralProtectionFault0(pVCpu);
3561 }
3562
3563 IEMSELDESC DescCS;
3564 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3565 if (rcStrict != VINF_SUCCESS)
3566 {
3567 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
3568 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3569 return rcStrict;
3570 }
3571
3572 /* Must be a code descriptor. */
3573 if ( !DescCS.Legacy.Gen.u1DescType
3574 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3575 {
3576 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
3577 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3578 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3579 }
3580
3581 /* Privilege checks. */
3582 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
3583 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3584 {
3585 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3586 {
3587 Log(("iret %04x:%016RX64 - RPL != DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3588 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3589 }
3590 }
3591 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3592 {
3593 Log(("iret %04x:%016RX64 - RPL < DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3594 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3595 }
3596 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
3597 {
3598 Log(("iret %04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl));
3599 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3600 }
3601
3602 /* Present? */
3603 if (!DescCS.Legacy.Gen.u1Present)
3604 {
3605 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3606 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3607 }
3608
3609 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3610
3611 /* Read the SS descriptor. */
3612 IEMSELDESC DescSS;
3613 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3614 {
3615 if ( !DescCS.Legacy.Gen.u1Long
3616 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
3617 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
3618 {
3619 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3620 return iemRaiseGeneralProtectionFault0(pVCpu);
3621 }
3622 DescSS.Legacy.u = 0;
3623 }
3624 else
3625 {
3626 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
3627 if (rcStrict != VINF_SUCCESS)
3628 {
3629 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
3630 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3631 return rcStrict;
3632 }
3633 }
3634
3635 /* Privilege checks. */
3636 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3637 {
3638 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3639 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3640 }
3641
3642 uint32_t cbLimitSs;
3643 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3644 cbLimitSs = UINT32_MAX;
3645 else
3646 {
3647 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3648 {
3649 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
3650 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
3651 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3652 }
3653
3654 /* Must be a writeable data segment descriptor. */
3655 if (!DescSS.Legacy.Gen.u1DescType)
3656 {
3657 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
3658 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3659 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3660 }
3661 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3662 {
3663 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
3664 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3665 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3666 }
3667
3668 /* Present? */
3669 if (!DescSS.Legacy.Gen.u1Present)
3670 {
3671 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3672 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSs);
3673 }
3674 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3675 }
3676
3677 /* Check EIP. */
3678 if (DescCS.Legacy.Gen.u1Long)
3679 {
3680 if (!IEM_IS_CANONICAL(uNewRip))
3681 {
3682 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
3683 uNewCs, uNewRip, uNewSs, uNewRsp));
3684 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3685 }
3686 }
3687 else
3688 {
3689 if (uNewRip > cbLimitCS)
3690 {
3691 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
3692 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
3693 /** @todo: Which is it, #GP(0) or #GP(sel)? */
3694 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3695 }
3696 }
3697
3698 /*
3699 * Commit the changes, marking CS and SS accessed first since
3700 * that may fail.
3701 */
3702 /** @todo where exactly are these actually marked accessed by a real CPU? */
3703 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3704 {
3705 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3706 if (rcStrict != VINF_SUCCESS)
3707 return rcStrict;
3708 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3709 }
3710 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3711 {
3712 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSs);
3713 if (rcStrict != VINF_SUCCESS)
3714 return rcStrict;
3715 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3716 }
3717
3718 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3719 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3720 if (enmEffOpSize != IEMMODE_16BIT)
3721 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3722 if (pVCpu->iem.s.uCpl == 0)
3723 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
3724 else if (pVCpu->iem.s.uCpl <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL)
3725 fEFlagsMask |= X86_EFL_IF;
3726 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu);
3727 fEFlagsNew &= ~fEFlagsMask;
3728 fEFlagsNew |= uNewFlags & fEFlagsMask;
3729#ifdef DBGFTRACE_ENABLED
3730 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
3731 pVCpu->iem.s.uCpl, uNewCpl, pVCpu->cpum.GstCtx.rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);
3732#endif
3733
3734 IEMMISC_SET_EFL(pVCpu, fEFlagsNew);
3735 pVCpu->cpum.GstCtx.rip = uNewRip;
3736 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3737 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3738 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3739 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3740 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3741 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3742 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
3743 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long || pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig)
3744 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3745 else
3746 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
3747 pVCpu->cpum.GstCtx.ss.Sel = uNewSs;
3748 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs;
3749 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3750 {
3751 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3752 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
3753 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3754 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3755 Log2(("iretq new SS: NULL\n"));
3756 }
3757 else
3758 {
3759 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3760 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3761 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
3762 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3763 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u));
3764 }
3765
3766 if (pVCpu->iem.s.uCpl != uNewCpl)
3767 {
3768 pVCpu->iem.s.uCpl = uNewCpl;
3769 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.ds);
3770 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.es);
3771 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.fs);
3772 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.gs);
3773 }
3774
3775 /* Flush the prefetch buffer. */
3776#ifdef IEM_WITH_CODE_TLB
3777 pVCpu->iem.s.pbInstrBuf = NULL;
3778#else
3779 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3780#endif
3781
3782 return VINF_SUCCESS;
3783}
3784
3785
3786/**
3787 * Implements iret.
3788 *
3789 * @param enmEffOpSize The effective operand size.
3790 */
3791IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
3792{
3793 bool fBlockingNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3794
3795#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3796 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3797 {
3798 /*
3799 * Record whether NMI (or virtual-NMI) blocking is in effect during the execution
3800 * of this IRET instruction. We need to provide this information as part of some
3801 * VM-exits.
3802 *
3803 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events".
3804 */
3805 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_VIRT_NMI))
3806 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking;
3807 else
3808 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = fBlockingNmi;
3809
3810 /*
3811 * If "NMI exiting" is set, IRET does not affect blocking of NMIs.
3812 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3813 */
3814 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_NMI_EXIT))
3815 fBlockingNmi = false;
3816
3817 /* Clear virtual-NMI blocking, if any, before causing any further exceptions. */
3818 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false;
3819 }
3820#endif
3821
3822 /*
3823 * The SVM nested-guest intercept for IRET takes priority over all exceptions,
3824 * The NMI is still held pending (which I assume means blocking of further NMIs
3825 * is in effect).
3826 *
3827 * See AMD spec. 15.9 "Instruction Intercepts".
3828 * See AMD spec. 15.21.9 "NMI Support".
3829 */
3830 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IRET))
3831 {
3832 Log(("iret: Guest intercept -> #VMEXIT\n"));
3833 IEM_SVM_UPDATE_NRIP(pVCpu);
3834 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3835 }
3836
3837 /*
3838 * Clear NMI blocking, if any, before causing any further exceptions.
3839 * See Intel spec. 6.7.1 "Handling Multiple NMIs".
3840 */
3841 if (fBlockingNmi)
3842 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
3843
3844 /*
3845 * Call a mode specific worker.
3846 */
3847 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
3848 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
3849 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3850 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
3851 return IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize);
3852 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
3853}
3854
3855
3856static void iemLoadallSetSelector(PVMCPU pVCpu, uint8_t iSegReg, uint16_t uSel)
3857{
3858 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
3859
3860 pHid->Sel = uSel;
3861 pHid->ValidSel = uSel;
3862 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
3863}
3864
3865
3866static void iemLoadall286SetDescCache(PVMCPU pVCpu, uint8_t iSegReg, uint8_t const *pbMem)
3867{
3868 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
3869
3870 /* The base is in the first three bytes. */
3871 pHid->u64Base = pbMem[0] + (pbMem[1] << 8) + (pbMem[2] << 16);
3872 /* The attributes are in the fourth byte. */
3873 pHid->Attr.u = pbMem[3];
3874 /* The limit is in the last two bytes. */
3875 pHid->u32Limit = pbMem[4] + (pbMem[5] << 8);
3876}
3877
3878
3879/**
3880 * Implements 286 LOADALL (286 CPUs only).
3881 */
3882IEM_CIMPL_DEF_0(iemCImpl_loadall286)
3883{
3884 NOREF(cbInstr);
3885
3886 /* Data is loaded from a buffer at 800h. No checks are done on the
3887 * validity of loaded state.
3888 *
3889 * LOADALL only loads the internal CPU state, it does not access any
3890 * GDT, LDT, or similar tables.
3891 */
3892
3893 if (pVCpu->iem.s.uCpl != 0)
3894 {
3895 Log(("loadall286: CPL must be 0 not %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
3896 return iemRaiseGeneralProtectionFault0(pVCpu);
3897 }
3898
3899 uint8_t const *pbMem = NULL;
3900 uint16_t const *pa16Mem;
3901 uint8_t const *pa8Mem;
3902 RTGCPHYS GCPtrStart = 0x800; /* Fixed table location. */
3903 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pbMem, 0x66, UINT8_MAX, GCPtrStart, IEM_ACCESS_SYS_R);
3904 if (rcStrict != VINF_SUCCESS)
3905 return rcStrict;
3906
3907 /* The MSW is at offset 0x06. */
3908 pa16Mem = (uint16_t const *)(pbMem + 0x06);
3909 /* Even LOADALL can't clear the MSW.PE bit, though it can set it. */
3910 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3911 uNewCr0 |= *pa16Mem & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3912 uint64_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
3913
3914 CPUMSetGuestCR0(pVCpu, uNewCr0);
3915 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCr0);
3916
3917 /* Inform PGM if mode changed. */
3918 if ((uNewCr0 & X86_CR0_PE) != (uOldCr0 & X86_CR0_PE))
3919 {
3920 int rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
3921 AssertRCReturn(rc, rc);
3922 /* ignore informational status codes */
3923 }
3924 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
3925
3926 /* TR selector is at offset 0x16. */
3927 pa16Mem = (uint16_t const *)(pbMem + 0x16);
3928 pVCpu->cpum.GstCtx.tr.Sel = pa16Mem[0];
3929 pVCpu->cpum.GstCtx.tr.ValidSel = pa16Mem[0];
3930 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3931
3932 /* Followed by FLAGS... */
3933 pVCpu->cpum.GstCtx.eflags.u = pa16Mem[1] | X86_EFL_1;
3934 pVCpu->cpum.GstCtx.ip = pa16Mem[2]; /* ...and IP. */
3935
3936 /* LDT is at offset 0x1C. */
3937 pa16Mem = (uint16_t const *)(pbMem + 0x1C);
3938 pVCpu->cpum.GstCtx.ldtr.Sel = pa16Mem[0];
3939 pVCpu->cpum.GstCtx.ldtr.ValidSel = pa16Mem[0];
3940 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3941
3942 /* Segment registers are at offset 0x1E. */
3943 pa16Mem = (uint16_t const *)(pbMem + 0x1E);
3944 iemLoadallSetSelector(pVCpu, X86_SREG_DS, pa16Mem[0]);
3945 iemLoadallSetSelector(pVCpu, X86_SREG_SS, pa16Mem[1]);
3946 iemLoadallSetSelector(pVCpu, X86_SREG_CS, pa16Mem[2]);
3947 iemLoadallSetSelector(pVCpu, X86_SREG_ES, pa16Mem[3]);
3948
3949 /* GPRs are at offset 0x26. */
3950 pa16Mem = (uint16_t const *)(pbMem + 0x26);
3951 pVCpu->cpum.GstCtx.di = pa16Mem[0];
3952 pVCpu->cpum.GstCtx.si = pa16Mem[1];
3953 pVCpu->cpum.GstCtx.bp = pa16Mem[2];
3954 pVCpu->cpum.GstCtx.sp = pa16Mem[3];
3955 pVCpu->cpum.GstCtx.bx = pa16Mem[4];
3956 pVCpu->cpum.GstCtx.dx = pa16Mem[5];
3957 pVCpu->cpum.GstCtx.cx = pa16Mem[6];
3958 pVCpu->cpum.GstCtx.ax = pa16Mem[7];
3959
3960 /* Descriptor caches are at offset 0x36, 6 bytes per entry. */
3961 iemLoadall286SetDescCache(pVCpu, X86_SREG_ES, pbMem + 0x36);
3962 iemLoadall286SetDescCache(pVCpu, X86_SREG_CS, pbMem + 0x3C);
3963 iemLoadall286SetDescCache(pVCpu, X86_SREG_SS, pbMem + 0x42);
3964 iemLoadall286SetDescCache(pVCpu, X86_SREG_DS, pbMem + 0x48);
3965
3966 /* GDTR contents are at offset 0x4E, 6 bytes. */
3967 RTGCPHYS GCPtrBase;
3968 uint16_t cbLimit;
3969 pa8Mem = pbMem + 0x4E;
3970 /* NB: Fourth byte "should be zero"; we are ignoring it. */
3971 GCPtrBase = pa8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16);
3972 cbLimit = pa8Mem[4] + (pa8Mem[5] << 8);
3973 CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
3974
3975 /* IDTR contents are at offset 0x5A, 6 bytes. */
3976 pa8Mem = pbMem + 0x5A;
3977 GCPtrBase = pa8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16);
3978 cbLimit = pa8Mem[4] + (pa8Mem[5] << 8);
3979 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
3980
3981 Log(("LOADALL: GDTR:%08RX64/%04X, IDTR:%08RX64/%04X\n", pVCpu->cpum.GstCtx.gdtr.pGdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.idtr.pIdt, pVCpu->cpum.GstCtx.idtr.cbIdt));
3982 Log(("LOADALL: CS:%04X, CS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.cs.u64Base, pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.Attr.u));
3983 Log(("LOADALL: DS:%04X, DS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.ds.u64Base, pVCpu->cpum.GstCtx.ds.u32Limit, pVCpu->cpum.GstCtx.ds.Attr.u));
3984 Log(("LOADALL: ES:%04X, ES base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.es.Sel, pVCpu->cpum.GstCtx.es.u64Base, pVCpu->cpum.GstCtx.es.u32Limit, pVCpu->cpum.GstCtx.es.Attr.u));
3985 Log(("LOADALL: SS:%04X, SS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u));
3986 Log(("LOADALL: SI:%04X, DI:%04X, AX:%04X, BX:%04X, CX:%04X, DX:%04X\n", pVCpu->cpum.GstCtx.si, pVCpu->cpum.GstCtx.di, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.cx, pVCpu->cpum.GstCtx.dx));
3987
3988 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pbMem, IEM_ACCESS_SYS_R);
3989 if (rcStrict != VINF_SUCCESS)
3990 return rcStrict;
3991
3992 /* The CPL may change. It is taken from the "DPL fields of the SS and CS
3993 * descriptor caches" but there is no word as to what happens if those are
3994 * not identical (probably bad things).
3995 */
3996 pVCpu->iem.s.uCpl = pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl;
3997
3998 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS | CPUM_CHANGED_IDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_TR | CPUM_CHANGED_LDTR);
3999
4000 /* Flush the prefetch buffer. */
4001#ifdef IEM_WITH_CODE_TLB
4002 pVCpu->iem.s.pbInstrBuf = NULL;
4003#else
4004 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4005#endif
4006 return rcStrict;
4007}
4008
4009
4010/**
4011 * Implements SYSCALL (AMD and Intel64).
4012 *
4013 * @param enmEffOpSize The effective operand size.
4014 */
4015IEM_CIMPL_DEF_0(iemCImpl_syscall)
4016{
4017 /** @todo hack, LOADALL should be decoded as such on a 286. */
4018 if (RT_UNLIKELY(pVCpu->iem.s.uTargetCpu == IEMTARGETCPU_286))
4019 return iemCImpl_loadall286(pVCpu, cbInstr);
4020
4021 /*
4022 * Check preconditions.
4023 *
4024 * Note that CPUs described in the documentation may load a few odd values
4025 * into CS and SS than we allow here. This has yet to be checked on real
4026 * hardware.
4027 */
4028 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE))
4029 {
4030 Log(("syscall: Not enabled in EFER -> #UD\n"));
4031 return iemRaiseUndefinedOpcode(pVCpu);
4032 }
4033 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4034 {
4035 Log(("syscall: Protected mode is required -> #GP(0)\n"));
4036 return iemRaiseGeneralProtectionFault0(pVCpu);
4037 }
4038 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4039 {
4040 Log(("syscall: Only available in long mode on intel -> #UD\n"));
4041 return iemRaiseUndefinedOpcode(pVCpu);
4042 }
4043
4044 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS);
4045
4046 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
4047 /** @todo what about LDT selectors? Shouldn't matter, really. */
4048 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4049 uint16_t uNewSs = uNewCs + 8;
4050 if (uNewCs == 0 || uNewSs == 0)
4051 {
4052 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4053 return iemRaiseGeneralProtectionFault0(pVCpu);
4054 }
4055
4056 /* Long mode and legacy mode differs. */
4057 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4058 {
4059 uint64_t uNewRip = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.msrLSTAR : pVCpu->cpum.GstCtx. msrCSTAR;
4060
4061 /* This test isn't in the docs, but I'm not trusting the guys writing
4062 the MSRs to have validated the values as canonical like they should. */
4063 if (!IEM_IS_CANONICAL(uNewRip))
4064 {
4065 Log(("syscall: Only available in long mode on intel -> #UD\n"));
4066 return iemRaiseUndefinedOpcode(pVCpu);
4067 }
4068
4069 /*
4070 * Commit it.
4071 */
4072 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, uNewRip));
4073 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.rip + cbInstr;
4074 pVCpu->cpum.GstCtx.rip = uNewRip;
4075
4076 pVCpu->cpum.GstCtx.rflags.u &= ~X86_EFL_RF;
4077 pVCpu->cpum.GstCtx.r11 = pVCpu->cpum.GstCtx.rflags.u;
4078 pVCpu->cpum.GstCtx.rflags.u &= ~pVCpu->cpum.GstCtx.msrSFMASK;
4079 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_1;
4080
4081 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
4082 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
4083 }
4084 else
4085 {
4086 /*
4087 * Commit it.
4088 */
4089 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n",
4090 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, (uint32_t)(pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
4091 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.eip + cbInstr;
4092 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
4093 pVCpu->cpum.GstCtx.rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
4094
4095 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
4096 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
4097 }
4098 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
4099 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
4100 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4101 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4102 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4103
4104 pVCpu->cpum.GstCtx.ss.Sel = uNewSs;
4105 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs;
4106 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4107 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4108 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4109
4110 /* Flush the prefetch buffer. */
4111#ifdef IEM_WITH_CODE_TLB
4112 pVCpu->iem.s.pbInstrBuf = NULL;
4113#else
4114 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4115#endif
4116
4117 return VINF_SUCCESS;
4118}
4119
4120
4121/**
4122 * Implements SYSRET (AMD and Intel64).
4123 */
4124IEM_CIMPL_DEF_0(iemCImpl_sysret)
4125
4126{
4127 RT_NOREF_PV(cbInstr);
4128
4129 /*
4130 * Check preconditions.
4131 *
4132 * Note that CPUs described in the documentation may load a few odd values
4133 * into CS and SS than we allow here. This has yet to be checked on real
4134 * hardware.
4135 */
4136 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE))
4137 {
4138 Log(("sysret: Not enabled in EFER -> #UD\n"));
4139 return iemRaiseUndefinedOpcode(pVCpu);
4140 }
4141 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4142 {
4143 Log(("sysret: Only available in long mode on intel -> #UD\n"));
4144 return iemRaiseUndefinedOpcode(pVCpu);
4145 }
4146 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4147 {
4148 Log(("sysret: Protected mode is required -> #GP(0)\n"));
4149 return iemRaiseGeneralProtectionFault0(pVCpu);
4150 }
4151 if (pVCpu->iem.s.uCpl != 0)
4152 {
4153 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
4154 return iemRaiseGeneralProtectionFault0(pVCpu);
4155 }
4156
4157 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS);
4158
4159 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
4160 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4161 uint16_t uNewSs = uNewCs + 8;
4162 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
4163 uNewCs += 16;
4164 if (uNewCs == 0 || uNewSs == 0)
4165 {
4166 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4167 return iemRaiseGeneralProtectionFault0(pVCpu);
4168 }
4169
4170 /*
4171 * Commit it.
4172 */
4173 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4174 {
4175 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
4176 {
4177 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n",
4178 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.r11));
4179 /* Note! We disregard intel manual regarding the RCX cananonical
4180 check, ask intel+xen why AMD doesn't do it. */
4181 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx;
4182 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4183 | (3 << X86DESCATTR_DPL_SHIFT);
4184 }
4185 else
4186 {
4187 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n",
4188 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.r11));
4189 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.ecx;
4190 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4191 | (3 << X86DESCATTR_DPL_SHIFT);
4192 }
4193 /** @todo testcase: See what kind of flags we can make SYSRET restore and
4194 * what it really ignores. RF and VM are hinted at being zero, by AMD. */
4195 pVCpu->cpum.GstCtx.rflags.u = pVCpu->cpum.GstCtx.r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
4196 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_1;
4197 }
4198 else
4199 {
4200 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx));
4201 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx;
4202 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_IF;
4203 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4204 | (3 << X86DESCATTR_DPL_SHIFT);
4205 }
4206 pVCpu->cpum.GstCtx.cs.Sel = uNewCs | 3;
4207 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs | 3;
4208 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4209 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4210 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4211
4212 pVCpu->cpum.GstCtx.ss.Sel = uNewSs | 3;
4213 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs | 3;
4214 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4215 /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */
4216 pVCpu->cpum.GstCtx.ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
4217 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
4218 * on sysret. */
4219
4220 /* Flush the prefetch buffer. */
4221#ifdef IEM_WITH_CODE_TLB
4222 pVCpu->iem.s.pbInstrBuf = NULL;
4223#else
4224 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4225#endif
4226
4227 return VINF_SUCCESS;
4228}
4229
4230
4231/**
4232 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
4233 *
4234 * @param iSegReg The segment register number (valid).
4235 * @param uSel The new selector value.
4236 */
4237IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
4238{
4239 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4240 uint16_t *pSel = iemSRegRef(pVCpu, iSegReg);
4241 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4242
4243 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
4244
4245 /*
4246 * Real mode and V8086 mode are easy.
4247 */
4248 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4249 {
4250 *pSel = uSel;
4251 pHid->u64Base = (uint32_t)uSel << 4;
4252 pHid->ValidSel = uSel;
4253 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4254#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
4255 /** @todo Does the CPU actually load limits and attributes in the
4256 * real/V8086 mode segment load case? It doesn't for CS in far
4257 * jumps... Affects unreal mode. */
4258 pHid->u32Limit = 0xffff;
4259 pHid->Attr.u = 0;
4260 pHid->Attr.n.u1Present = 1;
4261 pHid->Attr.n.u1DescType = 1;
4262 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
4263 ? X86_SEL_TYPE_RW
4264 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
4265#endif
4266 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4267 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4268 return VINF_SUCCESS;
4269 }
4270
4271 /*
4272 * Protected mode.
4273 *
4274 * Check if it's a null segment selector value first, that's OK for DS, ES,
4275 * FS and GS. If not null, then we have to load and parse the descriptor.
4276 */
4277 if (!(uSel & X86_SEL_MASK_OFF_RPL))
4278 {
4279 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
4280 if (iSegReg == X86_SREG_SS)
4281 {
4282 /* In 64-bit kernel mode, the stack can be 0 because of the way
4283 interrupts are dispatched. AMD seems to have a slighly more
4284 relaxed relationship to SS.RPL than intel does. */
4285 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
4286 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4287 || pVCpu->iem.s.uCpl > 2
4288 || ( uSel != pVCpu->iem.s.uCpl
4289 && !IEM_IS_GUEST_CPU_AMD(pVCpu)) )
4290 {
4291 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
4292 return iemRaiseGeneralProtectionFault0(pVCpu);
4293 }
4294 }
4295
4296 *pSel = uSel; /* Not RPL, remember :-) */
4297 iemHlpLoadNullDataSelectorProt(pVCpu, pHid, uSel);
4298 if (iSegReg == X86_SREG_SS)
4299 pHid->Attr.u |= pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT;
4300
4301 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4302 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4303
4304 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4305 return VINF_SUCCESS;
4306 }
4307
4308 /* Fetch the descriptor. */
4309 IEMSELDESC Desc;
4310 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
4311 if (rcStrict != VINF_SUCCESS)
4312 return rcStrict;
4313
4314 /* Check GPs first. */
4315 if (!Desc.Legacy.Gen.u1DescType)
4316 {
4317 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
4318 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4319 }
4320 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
4321 {
4322 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4323 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
4324 {
4325 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
4326 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4327 }
4328 if ((uSel & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
4329 {
4330 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pVCpu->iem.s.uCpl));
4331 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4332 }
4333 if (Desc.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl)
4334 {
4335 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4336 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4337 }
4338 }
4339 else
4340 {
4341 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4342 {
4343 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
4344 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4345 }
4346 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4347 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4348 {
4349#if 0 /* this is what intel says. */
4350 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4351 && pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4352 {
4353 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
4354 iSegReg, uSel, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl));
4355 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4356 }
4357#else /* this is what makes more sense. */
4358 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4359 {
4360 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4361 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4362 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4363 }
4364 if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4365 {
4366 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4367 iSegReg, uSel, pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl));
4368 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4369 }
4370#endif
4371 }
4372 }
4373
4374 /* Is it there? */
4375 if (!Desc.Legacy.Gen.u1Present)
4376 {
4377 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4378 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
4379 }
4380
4381 /* The base and limit. */
4382 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4383 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
4384
4385 /*
4386 * Ok, everything checked out fine. Now set the accessed bit before
4387 * committing the result into the registers.
4388 */
4389 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4390 {
4391 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
4392 if (rcStrict != VINF_SUCCESS)
4393 return rcStrict;
4394 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4395 }
4396
4397 /* commit */
4398 *pSel = uSel;
4399 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4400 pHid->u32Limit = cbLimit;
4401 pHid->u64Base = u64Base;
4402 pHid->ValidSel = uSel;
4403 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4404
4405 /** @todo check if the hidden bits are loaded correctly for 64-bit
4406 * mode. */
4407 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4408
4409 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4410 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4411 return VINF_SUCCESS;
4412}
4413
4414
4415/**
4416 * Implements 'mov SReg, r/m'.
4417 *
4418 * @param iSegReg The segment register number (valid).
4419 * @param uSel The new selector value.
4420 */
4421IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
4422{
4423 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4424 if (rcStrict == VINF_SUCCESS)
4425 {
4426 if (iSegReg == X86_SREG_SS)
4427 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
4428 }
4429 return rcStrict;
4430}
4431
4432
4433/**
4434 * Implements 'pop SReg'.
4435 *
4436 * @param iSegReg The segment register number (valid).
4437 * @param enmEffOpSize The efficient operand size (valid).
4438 */
4439IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4440{
4441 VBOXSTRICTRC rcStrict;
4442
4443 /*
4444 * Read the selector off the stack and join paths with mov ss, reg.
4445 */
4446 RTUINT64U TmpRsp;
4447 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
4448 switch (enmEffOpSize)
4449 {
4450 case IEMMODE_16BIT:
4451 {
4452 uint16_t uSel;
4453 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp);
4454 if (rcStrict == VINF_SUCCESS)
4455 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4456 break;
4457 }
4458
4459 case IEMMODE_32BIT:
4460 {
4461 uint32_t u32Value;
4462 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp);
4463 if (rcStrict == VINF_SUCCESS)
4464 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
4465 break;
4466 }
4467
4468 case IEMMODE_64BIT:
4469 {
4470 uint64_t u64Value;
4471 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp);
4472 if (rcStrict == VINF_SUCCESS)
4473 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
4474 break;
4475 }
4476 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4477 }
4478
4479 /*
4480 * Commit the stack on success.
4481 */
4482 if (rcStrict == VINF_SUCCESS)
4483 {
4484 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
4485 if (iSegReg == X86_SREG_SS)
4486 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
4487 }
4488 return rcStrict;
4489}
4490
4491
4492/**
4493 * Implements lgs, lfs, les, lds & lss.
4494 */
4495IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
4496 uint16_t, uSel,
4497 uint64_t, offSeg,
4498 uint8_t, iSegReg,
4499 uint8_t, iGReg,
4500 IEMMODE, enmEffOpSize)
4501{
4502 /*
4503 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
4504 */
4505 /** @todo verify and test that mov, pop and lXs works the segment
4506 * register loading in the exact same way. */
4507 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
4508 if (rcStrict == VINF_SUCCESS)
4509 {
4510 switch (enmEffOpSize)
4511 {
4512 case IEMMODE_16BIT:
4513 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4514 break;
4515 case IEMMODE_32BIT:
4516 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4517 break;
4518 case IEMMODE_64BIT:
4519 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4520 break;
4521 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4522 }
4523 }
4524
4525 return rcStrict;
4526}
4527
4528
4529/**
4530 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
4531 *
4532 * @retval VINF_SUCCESS on success.
4533 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
4534 * @retval iemMemFetchSysU64 return value.
4535 *
4536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4537 * @param uSel The selector value.
4538 * @param fAllowSysDesc Whether system descriptors are OK or not.
4539 * @param pDesc Where to return the descriptor on success.
4540 */
4541static VBOXSTRICTRC iemCImpl_LoadDescHelper(PVMCPU pVCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
4542{
4543 pDesc->Long.au64[0] = 0;
4544 pDesc->Long.au64[1] = 0;
4545
4546 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */
4547 return VINF_IEM_SELECTOR_NOT_OK;
4548
4549 /* Within the table limits? */
4550 RTGCPTR GCPtrBase;
4551 if (uSel & X86_SEL_LDT)
4552 {
4553 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
4554 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
4555 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
4556 return VINF_IEM_SELECTOR_NOT_OK;
4557 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
4558 }
4559 else
4560 {
4561 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR);
4562 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
4563 return VINF_IEM_SELECTOR_NOT_OK;
4564 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
4565 }
4566
4567 /* Fetch the descriptor. */
4568 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4569 if (rcStrict != VINF_SUCCESS)
4570 return rcStrict;
4571 if (!pDesc->Legacy.Gen.u1DescType)
4572 {
4573 if (!fAllowSysDesc)
4574 return VINF_IEM_SELECTOR_NOT_OK;
4575 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4576 {
4577 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
4578 if (rcStrict != VINF_SUCCESS)
4579 return rcStrict;
4580 }
4581
4582 }
4583
4584 return VINF_SUCCESS;
4585}
4586
4587
4588/**
4589 * Implements verr (fWrite = false) and verw (fWrite = true).
4590 */
4591IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite)
4592{
4593 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4594
4595 /** @todo figure whether the accessed bit is set or not. */
4596
4597 bool fAccessible = true;
4598 IEMSELDESC Desc;
4599 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4600 if (rcStrict == VINF_SUCCESS)
4601 {
4602 /* Check the descriptor, order doesn't matter much here. */
4603 if ( !Desc.Legacy.Gen.u1DescType
4604 || !Desc.Legacy.Gen.u1Present)
4605 fAccessible = false;
4606 else
4607 {
4608 if ( fWrite
4609 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE
4610 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4611 fAccessible = false;
4612
4613 /** @todo testcase for the conforming behavior. */
4614 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4615 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4616 {
4617 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4618 fAccessible = false;
4619 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4620 fAccessible = false;
4621 }
4622 }
4623
4624 }
4625 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4626 fAccessible = false;
4627 else
4628 return rcStrict;
4629
4630 /* commit */
4631 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fAccessible;
4632
4633 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4634 return VINF_SUCCESS;
4635}
4636
4637
4638/**
4639 * Implements LAR and LSL with 64-bit operand size.
4640 *
4641 * @returns VINF_SUCCESS.
4642 * @param pu16Dst Pointer to the destination register.
4643 * @param uSel The selector to load details for.
4644 * @param fIsLar true = LAR, false = LSL.
4645 */
4646IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar)
4647{
4648 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4649
4650 /** @todo figure whether the accessed bit is set or not. */
4651
4652 bool fDescOk = true;
4653 IEMSELDESC Desc;
4654 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, true /*fAllowSysDesc*/, &Desc);
4655 if (rcStrict == VINF_SUCCESS)
4656 {
4657 /*
4658 * Check the descriptor type.
4659 */
4660 if (!Desc.Legacy.Gen.u1DescType)
4661 {
4662 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4663 {
4664 if (Desc.Long.Gen.u5Zeros)
4665 fDescOk = false;
4666 else
4667 switch (Desc.Long.Gen.u4Type)
4668 {
4669 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
4670 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
4671 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
4672 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
4673 break;
4674 case AMD64_SEL_TYPE_SYS_CALL_GATE:
4675 fDescOk = fIsLar;
4676 break;
4677 default:
4678 fDescOk = false;
4679 break;
4680 }
4681 }
4682 else
4683 {
4684 switch (Desc.Long.Gen.u4Type)
4685 {
4686 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4687 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4688 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4689 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4690 case X86_SEL_TYPE_SYS_LDT:
4691 break;
4692 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4693 case X86_SEL_TYPE_SYS_TASK_GATE:
4694 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4695 fDescOk = fIsLar;
4696 break;
4697 default:
4698 fDescOk = false;
4699 break;
4700 }
4701 }
4702 }
4703 if (fDescOk)
4704 {
4705 /*
4706 * Check the RPL/DPL/CPL interaction..
4707 */
4708 /** @todo testcase for the conforming behavior. */
4709 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
4710 || !Desc.Legacy.Gen.u1DescType)
4711 {
4712 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4713 fDescOk = false;
4714 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4715 fDescOk = false;
4716 }
4717 }
4718
4719 if (fDescOk)
4720 {
4721 /*
4722 * All fine, start committing the result.
4723 */
4724 if (fIsLar)
4725 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00);
4726 else
4727 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy);
4728 }
4729
4730 }
4731 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4732 fDescOk = false;
4733 else
4734 return rcStrict;
4735
4736 /* commit flags value and advance rip. */
4737 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fDescOk;
4738 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4739
4740 return VINF_SUCCESS;
4741}
4742
4743
4744/**
4745 * Implements LAR and LSL with 16-bit operand size.
4746 *
4747 * @returns VINF_SUCCESS.
4748 * @param pu16Dst Pointer to the destination register.
4749 * @param u16Sel The selector to load details for.
4750 * @param fIsLar true = LAR, false = LSL.
4751 */
4752IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar)
4753{
4754 uint64_t u64TmpDst = *pu16Dst;
4755 IEM_CIMPL_CALL_3(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, fIsLar);
4756 *pu16Dst = u64TmpDst;
4757 return VINF_SUCCESS;
4758}
4759
4760
4761/**
4762 * Implements lgdt.
4763 *
4764 * @param iEffSeg The segment of the new gdtr contents
4765 * @param GCPtrEffSrc The address of the new gdtr contents.
4766 * @param enmEffOpSize The effective operand size.
4767 */
4768IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4769{
4770 if (pVCpu->iem.s.uCpl != 0)
4771 return iemRaiseGeneralProtectionFault0(pVCpu);
4772 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
4773
4774 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
4775 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
4776 {
4777 Log(("lgdt: Guest intercept -> VM-exit\n"));
4778 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_GDTR_IDTR_ACCESS, VMXINSTRID_LGDT, cbInstr);
4779 }
4780
4781 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_WRITES))
4782 {
4783 Log(("lgdt: Guest intercept -> #VMEXIT\n"));
4784 IEM_SVM_UPDATE_NRIP(pVCpu);
4785 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4786 }
4787
4788 /*
4789 * Fetch the limit and base address.
4790 */
4791 uint16_t cbLimit;
4792 RTGCPTR GCPtrBase;
4793 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4794 if (rcStrict == VINF_SUCCESS)
4795 {
4796 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4797 || X86_IS_CANONICAL(GCPtrBase))
4798 {
4799 rcStrict = CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
4800 if (rcStrict == VINF_SUCCESS)
4801 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4802 }
4803 else
4804 {
4805 Log(("iemCImpl_lgdt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
4806 return iemRaiseGeneralProtectionFault0(pVCpu);
4807 }
4808 }
4809 return rcStrict;
4810}
4811
4812
4813/**
4814 * Implements sgdt.
4815 *
4816 * @param iEffSeg The segment where to store the gdtr content.
4817 * @param GCPtrEffDst The address where to store the gdtr content.
4818 */
4819IEM_CIMPL_DEF_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4820{
4821 /*
4822 * Join paths with sidt.
4823 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4824 * you really must know.
4825 */
4826 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
4827 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
4828 {
4829 Log(("sgdt: Guest intercept -> VM-exit\n"));
4830 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_GDTR_IDTR_ACCESS, VMXINSTRID_SGDT, cbInstr);
4831 }
4832
4833 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_READS))
4834 {
4835 Log(("sgdt: Guest intercept -> #VMEXIT\n"));
4836 IEM_SVM_UPDATE_NRIP(pVCpu);
4837 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4838 }
4839
4840 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR);
4841 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.gdtr.pGdt, iEffSeg, GCPtrEffDst);
4842 if (rcStrict == VINF_SUCCESS)
4843 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4844 return rcStrict;
4845}
4846
4847
4848/**
4849 * Implements lidt.
4850 *
4851 * @param iEffSeg The segment of the new idtr contents
4852 * @param GCPtrEffSrc The address of the new idtr contents.
4853 * @param enmEffOpSize The effective operand size.
4854 */
4855IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
4856{
4857 if (pVCpu->iem.s.uCpl != 0)
4858 return iemRaiseGeneralProtectionFault0(pVCpu);
4859 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
4860
4861 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_WRITES))
4862 {
4863 Log(("lidt: Guest intercept -> #VMEXIT\n"));
4864 IEM_SVM_UPDATE_NRIP(pVCpu);
4865 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4866 }
4867
4868 /*
4869 * Fetch the limit and base address.
4870 */
4871 uint16_t cbLimit;
4872 RTGCPTR GCPtrBase;
4873 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
4874 if (rcStrict == VINF_SUCCESS)
4875 {
4876 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4877 || X86_IS_CANONICAL(GCPtrBase))
4878 {
4879 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
4880 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4881 }
4882 else
4883 {
4884 Log(("iemCImpl_lidt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
4885 return iemRaiseGeneralProtectionFault0(pVCpu);
4886 }
4887 }
4888 return rcStrict;
4889}
4890
4891
4892/**
4893 * Implements sidt.
4894 *
4895 * @param iEffSeg The segment where to store the idtr content.
4896 * @param GCPtrEffDst The address where to store the idtr content.
4897 */
4898IEM_CIMPL_DEF_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4899{
4900 /*
4901 * Join paths with sgdt.
4902 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
4903 * you really must know.
4904 */
4905 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
4906 {
4907 Log(("sidt: Guest intercept -> #VMEXIT\n"));
4908 IEM_SVM_UPDATE_NRIP(pVCpu);
4909 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4910 }
4911
4912 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_IDTR);
4913 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.idtr.cbIdt, pVCpu->cpum.GstCtx.idtr.pIdt, iEffSeg, GCPtrEffDst);
4914 if (rcStrict == VINF_SUCCESS)
4915 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4916 return rcStrict;
4917}
4918
4919
4920/**
4921 * Implements lldt.
4922 *
4923 * @param uNewLdt The new LDT selector value.
4924 */
4925IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
4926{
4927 /*
4928 * Check preconditions.
4929 */
4930 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4931 {
4932 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
4933 return iemRaiseUndefinedOpcode(pVCpu);
4934 }
4935 if (pVCpu->iem.s.uCpl != 0)
4936 {
4937 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pVCpu->iem.s.uCpl));
4938 return iemRaiseGeneralProtectionFault0(pVCpu);
4939 }
4940 /* Nested-guest VMX intercept. */
4941 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
4942 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
4943 {
4944 Log(("lldt: Guest intercept -> VM-exit\n"));
4945 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_LLDT, cbInstr);
4946 }
4947 if (uNewLdt & X86_SEL_LDT)
4948 {
4949 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
4950 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewLdt);
4951 }
4952
4953 /*
4954 * Now, loading a NULL selector is easy.
4955 */
4956 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4957 {
4958 /* Nested-guest SVM intercept. */
4959 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
4960 {
4961 Log(("lldt: Guest intercept -> #VMEXIT\n"));
4962 IEM_SVM_UPDATE_NRIP(pVCpu);
4963 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4964 }
4965
4966 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
4967 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_LDTR;
4968 CPUMSetGuestLDTR(pVCpu, uNewLdt);
4969 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4970 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4971 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
4972 {
4973 /* AMD-V seems to leave the base and limit alone. */
4974 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4975 }
4976 else
4977 {
4978 /* VT-x (Intel 3960x) seems to be doing the following. */
4979 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;
4980 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
4981 pVCpu->cpum.GstCtx.ldtr.u32Limit = UINT32_MAX;
4982 }
4983
4984 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4985 return VINF_SUCCESS;
4986 }
4987
4988 /*
4989 * Read the descriptor.
4990 */
4991 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR);
4992 IEMSELDESC Desc;
4993 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
4994 if (rcStrict != VINF_SUCCESS)
4995 return rcStrict;
4996
4997 /* Check GPs first. */
4998 if (Desc.Legacy.Gen.u1DescType)
4999 {
5000 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
5001 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5002 }
5003 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
5004 {
5005 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
5006 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5007 }
5008 uint64_t u64Base;
5009 if (!IEM_IS_LONG_MODE(pVCpu))
5010 u64Base = X86DESC_BASE(&Desc.Legacy);
5011 else
5012 {
5013 if (Desc.Long.Gen.u5Zeros)
5014 {
5015 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
5016 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5017 }
5018
5019 u64Base = X86DESC64_BASE(&Desc.Long);
5020 if (!IEM_IS_CANONICAL(u64Base))
5021 {
5022 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
5023 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5024 }
5025 }
5026
5027 /* NP */
5028 if (!Desc.Legacy.Gen.u1Present)
5029 {
5030 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
5031 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewLdt);
5032 }
5033
5034 /* Nested-guest SVM intercept. */
5035 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
5036 {
5037 Log(("lldt: Guest intercept -> #VMEXIT\n"));
5038 IEM_SVM_UPDATE_NRIP(pVCpu);
5039 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5040 }
5041
5042 /*
5043 * It checks out alright, update the registers.
5044 */
5045/** @todo check if the actual value is loaded or if the RPL is dropped */
5046 CPUMSetGuestLDTR(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5047 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
5048 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
5049 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5050 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5051 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
5052
5053 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5054 return VINF_SUCCESS;
5055}
5056
5057
5058/**
5059 * Implements sldt GReg
5060 *
5061 * @param iGReg The general register to store the CRx value in.
5062 * @param enmEffOpSize The operand size.
5063 */
5064IEM_CIMPL_DEF_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5065{
5066 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5067 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5068 {
5069 Log(("sldt: Guest intercept -> VM-exit\n"));
5070 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_SLDT, cbInstr);
5071 }
5072
5073 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
5074
5075 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
5076 switch (enmEffOpSize)
5077 {
5078 case IEMMODE_16BIT: *(uint16_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.ldtr.Sel; break;
5079 case IEMMODE_32BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.ldtr.Sel; break;
5080 case IEMMODE_64BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.ldtr.Sel; break;
5081 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5082 }
5083 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5084 return VINF_SUCCESS;
5085}
5086
5087
5088/**
5089 * Implements sldt mem.
5090 *
5091 * @param iGReg The general register to store the CRx value in.
5092 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5093 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5094 */
5095IEM_CIMPL_DEF_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5096{
5097 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
5098
5099 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
5100 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, pVCpu->cpum.GstCtx.ldtr.Sel);
5101 if (rcStrict == VINF_SUCCESS)
5102 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5103 return rcStrict;
5104}
5105
5106
5107/**
5108 * Implements ltr.
5109 *
5110 * @param uNewTr The new TSS selector value.
5111 */
5112IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
5113{
5114 /*
5115 * Check preconditions.
5116 */
5117 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5118 {
5119 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
5120 return iemRaiseUndefinedOpcode(pVCpu);
5121 }
5122 if (pVCpu->iem.s.uCpl != 0)
5123 {
5124 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pVCpu->iem.s.uCpl));
5125 return iemRaiseGeneralProtectionFault0(pVCpu);
5126 }
5127 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5128 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5129 {
5130 Log(("ltr: Guest intercept -> VM-exit\n"));
5131 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_LTR, cbInstr);
5132 }
5133 if (uNewTr & X86_SEL_LDT)
5134 {
5135 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
5136 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewTr);
5137 }
5138 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
5139 {
5140 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
5141 return iemRaiseGeneralProtectionFault0(pVCpu);
5142 }
5143 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TR_WRITES))
5144 {
5145 Log(("ltr: Guest intercept -> #VMEXIT\n"));
5146 IEM_SVM_UPDATE_NRIP(pVCpu);
5147 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5148 }
5149
5150 /*
5151 * Read the descriptor.
5152 */
5153 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_TR);
5154 IEMSELDESC Desc;
5155 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
5156 if (rcStrict != VINF_SUCCESS)
5157 return rcStrict;
5158
5159 /* Check GPs first. */
5160 if (Desc.Legacy.Gen.u1DescType)
5161 {
5162 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5163 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5164 }
5165 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
5166 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
5167 || IEM_IS_LONG_MODE(pVCpu)) )
5168 {
5169 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5170 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5171 }
5172 uint64_t u64Base;
5173 if (!IEM_IS_LONG_MODE(pVCpu))
5174 u64Base = X86DESC_BASE(&Desc.Legacy);
5175 else
5176 {
5177 if (Desc.Long.Gen.u5Zeros)
5178 {
5179 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
5180 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5181 }
5182
5183 u64Base = X86DESC64_BASE(&Desc.Long);
5184 if (!IEM_IS_CANONICAL(u64Base))
5185 {
5186 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
5187 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5188 }
5189 }
5190
5191 /* NP */
5192 if (!Desc.Legacy.Gen.u1Present)
5193 {
5194 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
5195 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewTr);
5196 }
5197
5198 /*
5199 * Set it busy.
5200 * Note! Intel says this should lock down the whole descriptor, but we'll
5201 * restrict our selves to 32-bit for now due to lack of inline
5202 * assembly and such.
5203 */
5204 void *pvDesc;
5205 rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pVCpu->cpum.GstCtx.gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW);
5206 if (rcStrict != VINF_SUCCESS)
5207 return rcStrict;
5208 switch ((uintptr_t)pvDesc & 3)
5209 {
5210 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
5211 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
5212 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
5213 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
5214 }
5215 rcStrict = iemMemCommitAndUnmap(pVCpu, pvDesc, IEM_ACCESS_DATA_RW);
5216 if (rcStrict != VINF_SUCCESS)
5217 return rcStrict;
5218 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
5219
5220 /*
5221 * It checks out alright, update the registers.
5222 */
5223/** @todo check if the actual value is loaded or if the RPL is dropped */
5224 CPUMSetGuestTR(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5225 pVCpu->cpum.GstCtx.tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
5226 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
5227 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5228 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5229 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
5230
5231 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5232 return VINF_SUCCESS;
5233}
5234
5235
5236/**
5237 * Implements str GReg
5238 *
5239 * @param iGReg The general register to store the CRx value in.
5240 * @param enmEffOpSize The operand size.
5241 */
5242IEM_CIMPL_DEF_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5243{
5244 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5245 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5246 {
5247 Log(("str_reg: Guest intercept -> VM-exit\n"));
5248 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_STR, cbInstr);
5249 }
5250
5251 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
5252
5253 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
5254 switch (enmEffOpSize)
5255 {
5256 case IEMMODE_16BIT: *(uint16_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.tr.Sel; break;
5257 case IEMMODE_32BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.tr.Sel; break;
5258 case IEMMODE_64BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.tr.Sel; break;
5259 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5260 }
5261 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5262 return VINF_SUCCESS;
5263}
5264
5265
5266/**
5267 * Implements str mem.
5268 *
5269 * @param iGReg The general register to store the CRx value in.
5270 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5271 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5272 */
5273IEM_CIMPL_DEF_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5274{
5275 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5276 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5277 {
5278 Log(("str_mem: Guest intercept -> VM-exit\n"));
5279 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_STR, cbInstr);
5280 }
5281
5282 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
5283
5284 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
5285 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, pVCpu->cpum.GstCtx.tr.Sel);
5286 if (rcStrict == VINF_SUCCESS)
5287 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5288 return rcStrict;
5289}
5290
5291
5292/**
5293 * Implements mov GReg,CRx.
5294 *
5295 * @param iGReg The general register to store the CRx value in.
5296 * @param iCrReg The CRx register to read (valid).
5297 */
5298IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
5299{
5300 if (pVCpu->iem.s.uCpl != 0)
5301 return iemRaiseGeneralProtectionFault0(pVCpu);
5302 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5303
5304 if (IEM_SVM_IS_READ_CR_INTERCEPT_SET(pVCpu, iCrReg))
5305 {
5306 Log(("iemCImpl_mov_Rd_Cd: Guest intercept CR%u -> #VMEXIT\n", iCrReg));
5307 IEM_SVM_UPDATE_NRIP(pVCpu);
5308 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);
5309 }
5310
5311 /* Read it. */
5312 uint64_t crX;
5313 switch (iCrReg)
5314 {
5315 case 0:
5316 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5317 crX = pVCpu->cpum.GstCtx.cr0;
5318 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
5319 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */
5320 break;
5321 case 2:
5322 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR2);
5323 crX = pVCpu->cpum.GstCtx.cr2;
5324 break;
5325 case 3:
5326 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
5327 crX = pVCpu->cpum.GstCtx.cr3;
5328 break;
5329 case 4:
5330 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5331 crX = pVCpu->cpum.GstCtx.cr4;
5332 break;
5333 case 8:
5334 {
5335 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
5336#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5337 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5338 {
5339 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovFromCr8(pVCpu, iGReg, cbInstr);
5340 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5341 return rcStrict;
5342
5343 /*
5344 * If the Mov-from-CR8 doesn't cause a VM-exit, bits 7:4 of the VTPR is copied
5345 * to bits 0:3 of the destination operand. Bits 63:4 of the destination operand
5346 * are cleared.
5347 *
5348 * See Intel Spec. 29.3 "Virtualizing CR8-based TPR Accesses"
5349 */
5350 if (IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
5351 {
5352 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
5353 crX = (uTpr >> 4) & 0xf;
5354 break;
5355 }
5356 }
5357#endif
5358#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5359 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5360 {
5361 PCSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
5362 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu)))
5363 {
5364 crX = pVmcbCtrl->IntCtrl.n.u8VTPR & 0xf;
5365 break;
5366 }
5367 }
5368#endif
5369 uint8_t uTpr;
5370 int rc = APICGetTpr(pVCpu, &uTpr, NULL, NULL);
5371 if (RT_SUCCESS(rc))
5372 crX = uTpr >> 4;
5373 else
5374 crX = 0;
5375 break;
5376 }
5377 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5378 }
5379
5380#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5381 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5382 {
5383 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5384 Assert(pVmcs);
5385 switch (iCrReg)
5386 {
5387 /* CR0/CR4 reads are subject to masking when in VMX non-root mode. */
5388 case 0: crX = CPUMGetGuestVmxMaskedCr0(pVCpu, &pVCpu->cpum.GstCtx, pVmcs->u64Cr0Mask.u); break;
5389 case 4: crX = CPUMGetGuestVmxMaskedCr4(pVCpu, &pVCpu->cpum.GstCtx, pVmcs->u64Cr4Mask.u); break;
5390
5391 case 3:
5392 {
5393 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovFromCr3(pVCpu, iGReg, cbInstr);
5394 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5395 return rcStrict;
5396 break;
5397 }
5398 }
5399 }
5400#endif
5401
5402 /* Store it. */
5403 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5404 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = crX;
5405 else
5406 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)crX;
5407
5408 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5409 return VINF_SUCCESS;
5410}
5411
5412
5413/**
5414 * Implements smsw GReg.
5415 *
5416 * @param iGReg The general register to store the CRx value in.
5417 * @param enmEffOpSize The operand size.
5418 */
5419IEM_CIMPL_DEF_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5420{
5421 IEM_SVM_CHECK_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5422
5423#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5424 uint64_t u64MaskedCr0;
5425 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5426 u64MaskedCr0 = pVCpu->cpum.GstCtx.cr0;
5427 else
5428 {
5429 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5430 Assert(pVmcs);
5431 u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(pVCpu, &pVCpu->cpum.GstCtx, pVmcs->u64Cr0Mask.u);
5432 }
5433 uint64_t const u64GuestCr0 = u64MaskedCr0;
5434#else
5435 uint64_t const u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
5436#endif
5437
5438 switch (enmEffOpSize)
5439 {
5440 case IEMMODE_16BIT:
5441 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
5442 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = (uint16_t)u64GuestCr0;
5443 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5444 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = (uint16_t)u64GuestCr0 | 0xffe0;
5445 else
5446 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = (uint16_t)u64GuestCr0 | 0xfff0;
5447 break;
5448
5449 case IEMMODE_32BIT:
5450 *(uint32_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)u64GuestCr0;
5451 break;
5452
5453 case IEMMODE_64BIT:
5454 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = u64GuestCr0;
5455 break;
5456
5457 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5458 }
5459
5460 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5461 return VINF_SUCCESS;
5462}
5463
5464
5465/**
5466 * Implements smsw mem.
5467 *
5468 * @param iGReg The general register to store the CR0 value in.
5469 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5470 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5471 */
5472IEM_CIMPL_DEF_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5473{
5474 IEM_SVM_CHECK_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5475
5476#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5477 uint64_t u64MaskedCr0;
5478 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5479 u64MaskedCr0 = pVCpu->cpum.GstCtx.cr0;
5480 else
5481 {
5482 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5483 Assert(pVmcs);
5484 u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(pVCpu, &pVCpu->cpum.GstCtx, pVmcs->u64Cr0Mask.u);
5485 }
5486 uint64_t const u64GuestCr0 = u64MaskedCr0;
5487#else
5488 uint64_t const u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
5489#endif
5490
5491 uint16_t u16Value;
5492 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
5493 u16Value = (uint16_t)u64GuestCr0;
5494 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5495 u16Value = (uint16_t)u64GuestCr0 | 0xffe0;
5496 else
5497 u16Value = (uint16_t)u64GuestCr0 | 0xfff0;
5498
5499 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, u16Value);
5500 if (rcStrict == VINF_SUCCESS)
5501 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5502 return rcStrict;
5503}
5504
5505
5506/**
5507 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
5508 *
5509 * @param iCrReg The CRx register to write (valid).
5510 * @param uNewCrX The new value.
5511 * @param enmAccessCrx The instruction that caused the CrX load.
5512 * @param iGReg The general register in case of a 'mov CRx,GReg'
5513 * instruction.
5514 */
5515IEM_CIMPL_DEF_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg)
5516{
5517 VBOXSTRICTRC rcStrict;
5518 int rc;
5519#ifndef VBOX_WITH_NESTED_HWVIRT_SVM
5520 RT_NOREF2(iGReg, enmAccessCrX);
5521#endif
5522
5523 /*
5524 * Try store it.
5525 * Unfortunately, CPUM only does a tiny bit of the work.
5526 */
5527 switch (iCrReg)
5528 {
5529 case 0:
5530 {
5531 /*
5532 * Perform checks.
5533 */
5534 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5535
5536 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr0;
5537 uint32_t const fValid = CPUMGetGuestCR0ValidMask();
5538
5539 /* ET is hardcoded on 486 and later. */
5540 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_486)
5541 uNewCrX |= X86_CR0_ET;
5542 /* The 386 and 486 didn't #GP(0) on attempting to set reserved CR0 bits. ET was settable on 386. */
5543 else if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_486)
5544 {
5545 uNewCrX &= fValid;
5546 uNewCrX |= X86_CR0_ET;
5547 }
5548 else
5549 uNewCrX &= X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG | X86_CR0_ET;
5550
5551 /* Check for reserved bits. */
5552 if (uNewCrX & ~(uint64_t)fValid)
5553 {
5554 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5555 return iemRaiseGeneralProtectionFault0(pVCpu);
5556 }
5557
5558 /* Check for invalid combinations. */
5559 if ( (uNewCrX & X86_CR0_PG)
5560 && !(uNewCrX & X86_CR0_PE) )
5561 {
5562 Log(("Trying to set CR0.PG without CR0.PE\n"));
5563 return iemRaiseGeneralProtectionFault0(pVCpu);
5564 }
5565
5566 if ( !(uNewCrX & X86_CR0_CD)
5567 && (uNewCrX & X86_CR0_NW) )
5568 {
5569 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
5570 return iemRaiseGeneralProtectionFault0(pVCpu);
5571 }
5572
5573 if ( !(uNewCrX & X86_CR0_PG)
5574 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE))
5575 {
5576 Log(("Trying to clear CR0.PG while leaving CR4.PCID set\n"));
5577 return iemRaiseGeneralProtectionFault0(pVCpu);
5578 }
5579
5580 /* Long mode consistency checks. */
5581 if ( (uNewCrX & X86_CR0_PG)
5582 && !(uOldCrX & X86_CR0_PG)
5583 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) )
5584 {
5585 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE))
5586 {
5587 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
5588 return iemRaiseGeneralProtectionFault0(pVCpu);
5589 }
5590 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long)
5591 {
5592 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
5593 return iemRaiseGeneralProtectionFault0(pVCpu);
5594 }
5595 }
5596
5597 /* Check for bits that must remain set or cleared in VMX operation,
5598 see Intel spec. 23.8 "Restrictions on VMX operation". */
5599 if (IEM_VMX_IS_ROOT_MODE(pVCpu))
5600 {
5601 uint32_t const uCr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
5602 if ((uNewCrX & uCr0Fixed0) != uCr0Fixed0)
5603 {
5604 Log(("Trying to clear reserved CR0 bits in VMX operation: NewCr0=%#llx MB1=%#llx\n", uNewCrX, uCr0Fixed0));
5605 return iemRaiseGeneralProtectionFault0(pVCpu);
5606 }
5607
5608 uint32_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
5609 if (uNewCrX & ~uCr0Fixed1)
5610 {
5611 Log(("Trying to set reserved CR0 bits in VMX operation: NewCr0=%#llx MB0=%#llx\n", uNewCrX, uCr0Fixed1));
5612 return iemRaiseGeneralProtectionFault0(pVCpu);
5613 }
5614 }
5615
5616 /** @todo check reserved PDPTR bits as AMD states. */
5617
5618 /*
5619 * SVM nested-guest CR0 write intercepts.
5620 */
5621 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, iCrReg))
5622 {
5623 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5624 IEM_SVM_UPDATE_NRIP(pVCpu);
5625 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);
5626 }
5627 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
5628 {
5629 /* 'lmsw' intercepts regardless of whether the TS/MP bits are actually toggled. */
5630 if ( enmAccessCrX == IEMACCESSCRX_LMSW
5631 || (uNewCrX & ~(X86_CR0_TS | X86_CR0_MP)) != (uOldCrX & ~(X86_CR0_TS | X86_CR0_MP)))
5632 {
5633 Assert(enmAccessCrX != IEMACCESSCRX_CLTS);
5634 Log(("iemCImpl_load_Cr%#x: lmsw or bits other than TS/MP changed: Guest intercept -> #VMEXIT\n", iCrReg));
5635 IEM_SVM_UPDATE_NRIP(pVCpu);
5636 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_CR0_SEL_WRITE, enmAccessCrX, iGReg);
5637 }
5638 }
5639
5640 /*
5641 * Change CR0.
5642 */
5643 CPUMSetGuestCR0(pVCpu, uNewCrX);
5644 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCrX);
5645
5646 /*
5647 * Change EFER.LMA if entering or leaving long mode.
5648 */
5649 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
5650 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) )
5651 {
5652 uint64_t NewEFER = pVCpu->cpum.GstCtx.msrEFER;
5653 if (uNewCrX & X86_CR0_PG)
5654 NewEFER |= MSR_K6_EFER_LMA;
5655 else
5656 NewEFER &= ~MSR_K6_EFER_LMA;
5657
5658 CPUMSetGuestEFER(pVCpu, NewEFER);
5659 Assert(pVCpu->cpum.GstCtx.msrEFER == NewEFER);
5660 }
5661
5662 /*
5663 * Inform PGM.
5664 */
5665 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
5666 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
5667 {
5668 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
5669 AssertRCReturn(rc, rc);
5670 /* ignore informational status codes */
5671 }
5672 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
5673 break;
5674 }
5675
5676 /*
5677 * CR2 can be changed without any restrictions.
5678 */
5679 case 2:
5680 {
5681 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 2))
5682 {
5683 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5684 IEM_SVM_UPDATE_NRIP(pVCpu);
5685 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);
5686 }
5687 pVCpu->cpum.GstCtx.cr2 = uNewCrX;
5688 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_CR2;
5689 rcStrict = VINF_SUCCESS;
5690 break;
5691 }
5692
5693 /*
5694 * CR3 is relatively simple, although AMD and Intel have different
5695 * accounts of how setting reserved bits are handled. We take intel's
5696 * word for the lower bits and AMD's for the high bits (63:52). The
5697 * lower reserved bits are ignored and left alone; OpenBSD 5.8 relies
5698 * on this.
5699 */
5700 /** @todo Testcase: Setting reserved bits in CR3, especially before
5701 * enabling paging. */
5702 case 3:
5703 {
5704 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
5705
5706 /* Bit 63 being clear in the source operand with PCIDE indicates no invalidations are required. */
5707 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE)
5708 && (uNewCrX & RT_BIT_64(63)))
5709 {
5710 /** @todo r=ramshankar: avoiding a TLB flush altogether here causes Windows 10
5711 * SMP(w/o nested-paging) to hang during bootup on Skylake systems, see
5712 * Intel spec. 4.10.4.1 "Operations that Invalidate TLBs and
5713 * Paging-Structure Caches". */
5714 uNewCrX &= ~RT_BIT_64(63);
5715 }
5716
5717 /* Check / mask the value. */
5718 if (uNewCrX & UINT64_C(0xfff0000000000000))
5719 {
5720 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
5721 return iemRaiseGeneralProtectionFault0(pVCpu);
5722 }
5723
5724 uint64_t fValid;
5725 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5726 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME))
5727 fValid = UINT64_C(0x000fffffffffffff);
5728 else
5729 fValid = UINT64_C(0xffffffff);
5730 if (uNewCrX & ~fValid)
5731 {
5732 Log(("Automatically clearing reserved MBZ bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
5733 uNewCrX, uNewCrX & ~fValid));
5734 uNewCrX &= fValid;
5735 }
5736
5737 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 3))
5738 {
5739 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5740 IEM_SVM_UPDATE_NRIP(pVCpu);
5741 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);
5742 }
5743
5744 /** @todo If we're in PAE mode we should check the PDPTRs for
5745 * invalid bits. */
5746
5747 /* Make the change. */
5748 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
5749 AssertRCSuccessReturn(rc, rc);
5750
5751 /* Inform PGM. */
5752 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG)
5753 {
5754 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
5755 AssertRCReturn(rc, rc);
5756 /* ignore informational status codes */
5757 }
5758 rcStrict = VINF_SUCCESS;
5759 break;
5760 }
5761
5762 /*
5763 * CR4 is a bit more tedious as there are bits which cannot be cleared
5764 * under some circumstances and such.
5765 */
5766 case 4:
5767 {
5768 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5769 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr4;
5770
5771 /* Reserved bits. */
5772 uint32_t const fValid = CPUMGetGuestCR4ValidMask(pVCpu->CTX_SUFF(pVM));
5773 if (uNewCrX & ~(uint64_t)fValid)
5774 {
5775 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5776 return iemRaiseGeneralProtectionFault0(pVCpu);
5777 }
5778
5779 bool const fPcide = ((uNewCrX ^ uOldCrX) & X86_CR4_PCIDE) && (uNewCrX & X86_CR4_PCIDE);
5780 bool const fLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
5781
5782 /* PCIDE check. */
5783 if ( fPcide
5784 && ( !fLongMode
5785 || (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff))))
5786 {
5787 Log(("Trying to set PCIDE with invalid PCID or outside long mode. Pcid=%#x\n", (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff))));
5788 return iemRaiseGeneralProtectionFault0(pVCpu);
5789 }
5790
5791 /* PAE check. */
5792 if ( fLongMode
5793 && (uOldCrX & X86_CR4_PAE)
5794 && !(uNewCrX & X86_CR4_PAE))
5795 {
5796 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
5797 return iemRaiseGeneralProtectionFault0(pVCpu);
5798 }
5799
5800 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 4))
5801 {
5802 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5803 IEM_SVM_UPDATE_NRIP(pVCpu);
5804 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
5805 }
5806
5807 /* Check for bits that must remain set or cleared in VMX operation,
5808 see Intel spec. 23.8 "Restrictions on VMX operation". */
5809 if (IEM_VMX_IS_ROOT_MODE(pVCpu))
5810 {
5811 uint32_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
5812 if ((uNewCrX & uCr4Fixed0) != uCr4Fixed0)
5813 {
5814 Log(("Trying to clear reserved CR4 bits in VMX operation: NewCr4=%#llx MB1=%#llx\n", uNewCrX, uCr4Fixed0));
5815 return iemRaiseGeneralProtectionFault0(pVCpu);
5816 }
5817
5818 uint32_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
5819 if (uNewCrX & ~uCr4Fixed1)
5820 {
5821 Log(("Trying to set reserved CR4 bits in VMX operation: NewCr4=%#llx MB0=%#llx\n", uNewCrX, uCr4Fixed1));
5822 return iemRaiseGeneralProtectionFault0(pVCpu);
5823 }
5824 }
5825
5826 /*
5827 * Change it.
5828 */
5829 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
5830 AssertRCSuccessReturn(rc, rc);
5831 Assert(pVCpu->cpum.GstCtx.cr4 == uNewCrX);
5832
5833 /*
5834 * Notify SELM and PGM.
5835 */
5836 /* SELM - VME may change things wrt to the TSS shadowing. */
5837 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
5838 Log(("iemCImpl_load_CrX: VME %d -> %d\n", RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
5839
5840 /* PGM - flushing and mode. */
5841 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */))
5842 {
5843 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
5844 AssertRCReturn(rc, rc);
5845 /* ignore informational status codes */
5846 }
5847 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
5848 break;
5849 }
5850
5851 /*
5852 * CR8 maps to the APIC TPR.
5853 */
5854 case 8:
5855 {
5856 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
5857 if (uNewCrX & ~(uint64_t)0xf)
5858 {
5859 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
5860 return iemRaiseGeneralProtectionFault0(pVCpu);
5861 }
5862
5863#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5864 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5865 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
5866 {
5867 /*
5868 * If the Mov-to-CR8 doesn't cause a VM-exit, bits 0:3 of the source operand
5869 * is copied to bits 7:4 of the VTPR. Bits 0:3 and bits 31:8 of the VTPR are
5870 * cleared. Following this the processor performs TPR virtualization.
5871 *
5872 * However, we should not perform TPR virtualization immediately here but
5873 * after this instruction has completed.
5874 *
5875 * See Intel spec. 29.3 "Virtualizing CR8-based TPR Accesses"
5876 * See Intel spec. 27.1 "Architectural State Before A VM-exit"
5877 */
5878 uint32_t const uTpr = (uNewCrX & 0xf) << 4;
5879 Log(("iemCImpl_load_Cr%#x: Virtualizing TPR (%#x) write\n", iCrReg, uTpr));
5880 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uTpr);
5881 iemVmxVirtApicSetPendingWrite(pVCpu, XAPIC_OFF_TPR);
5882 rcStrict = VINF_SUCCESS;
5883 break;
5884 }
5885#endif
5886
5887#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5888 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5889 {
5890 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 8))
5891 {
5892 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5893 IEM_SVM_UPDATE_NRIP(pVCpu);
5894 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);
5895 }
5896
5897 PSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
5898 pVmcbCtrl->IntCtrl.n.u8VTPR = uNewCrX;
5899 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu)))
5900 {
5901 rcStrict = VINF_SUCCESS;
5902 break;
5903 }
5904 }
5905#endif
5906 uint8_t const u8Tpr = (uint8_t)uNewCrX << 4;
5907 APICSetTpr(pVCpu, u8Tpr);
5908 rcStrict = VINF_SUCCESS;
5909 break;
5910 }
5911
5912 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5913 }
5914
5915 /*
5916 * Advance the RIP on success.
5917 */
5918 if (RT_SUCCESS(rcStrict))
5919 {
5920 if (rcStrict != VINF_SUCCESS)
5921 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5922 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5923 }
5924
5925 return rcStrict;
5926}
5927
5928
5929/**
5930 * Implements mov CRx,GReg.
5931 *
5932 * @param iCrReg The CRx register to write (valid).
5933 * @param iGReg The general register to load the CRx value from.
5934 */
5935IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
5936{
5937 if (pVCpu->iem.s.uCpl != 0)
5938 return iemRaiseGeneralProtectionFault0(pVCpu);
5939 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5940
5941 /*
5942 * Read the new value from the source register and call common worker.
5943 */
5944 uint64_t uNewCrX;
5945 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5946 uNewCrX = iemGRegFetchU64(pVCpu, iGReg);
5947 else
5948 uNewCrX = iemGRegFetchU32(pVCpu, iGReg);
5949
5950#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5951 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5952 {
5953 VBOXSTRICTRC rcStrict = VINF_VMX_INTERCEPT_NOT_ACTIVE;
5954 switch (iCrReg)
5955 {
5956 case 0:
5957 case 4: rcStrict = iemVmxVmexitInstrMovToCr0Cr4(pVCpu, iCrReg, &uNewCrX, iGReg, cbInstr); break;
5958 case 3: rcStrict = iemVmxVmexitInstrMovToCr3(pVCpu, uNewCrX, iGReg, cbInstr); break;
5959 case 8: rcStrict = iemVmxVmexitInstrMovToCr8(pVCpu, iGReg, cbInstr); break;
5960 }
5961 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5962 return rcStrict;
5963 }
5964#endif
5965
5966 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, iCrReg, uNewCrX, IEMACCESSCRX_MOV_CRX, iGReg);
5967}
5968
5969
5970/**
5971 * Implements 'LMSW r/m16'
5972 *
5973 * @param u16NewMsw The new value.
5974 * @param GCPtrEffDst The guest-linear address of the source operand in case
5975 * of a memory operand. For register operand, pass
5976 * NIL_RTGCPTR.
5977 */
5978IEM_CIMPL_DEF_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst)
5979{
5980 if (pVCpu->iem.s.uCpl != 0)
5981 return iemRaiseGeneralProtectionFault0(pVCpu);
5982 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5983 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5984
5985#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5986 /* Check nested-guest VMX intercept and get updated MSW if there's no VM-exit. */
5987 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5988 {
5989 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrLmsw(pVCpu, pVCpu->cpum.GstCtx.cr0, &u16NewMsw, GCPtrEffDst, cbInstr);
5990 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5991 return rcStrict;
5992 }
5993#else
5994 RT_NOREF_PV(GCPtrEffDst);
5995#endif
5996
5997 /*
5998 * Compose the new CR0 value and call common worker.
5999 */
6000 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
6001 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
6002 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */);
6003}
6004
6005
6006/**
6007 * Implements 'CLTS'.
6008 */
6009IEM_CIMPL_DEF_0(iemCImpl_clts)
6010{
6011 if (pVCpu->iem.s.uCpl != 0)
6012 return iemRaiseGeneralProtectionFault0(pVCpu);
6013
6014 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6015 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0;
6016 uNewCr0 &= ~X86_CR0_TS;
6017
6018#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6019 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6020 {
6021 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrClts(pVCpu, cbInstr);
6022 if (rcStrict == VINF_VMX_MODIFIES_BEHAVIOR)
6023 uNewCr0 |= (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS);
6024 else if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6025 return rcStrict;
6026 }
6027#endif
6028
6029 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_CLTS, UINT8_MAX /* iGReg */);
6030}
6031
6032
6033/**
6034 * Implements mov GReg,DRx.
6035 *
6036 * @param iGReg The general register to store the DRx value in.
6037 * @param iDrReg The DRx register to read (0-7).
6038 */
6039IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
6040{
6041#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6042 /*
6043 * Check nested-guest VMX intercept.
6044 * Unlike most other intercepts, the Mov DRx intercept takes preceedence
6045 * over CPL and CR4.DE and even DR4/DR5 checks.
6046 *
6047 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
6048 */
6049 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6050 {
6051 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovDrX(pVCpu, VMXINSTRID_MOV_FROM_DRX, iDrReg, iGReg, cbInstr);
6052 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6053 return rcStrict;
6054 }
6055#endif
6056
6057 /*
6058 * Check preconditions.
6059 */
6060 /* Raise GPs. */
6061 if (pVCpu->iem.s.uCpl != 0)
6062 return iemRaiseGeneralProtectionFault0(pVCpu);
6063 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6064 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_CR0);
6065
6066 if ( (iDrReg == 4 || iDrReg == 5)
6067 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE) )
6068 {
6069 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
6070 return iemRaiseGeneralProtectionFault0(pVCpu);
6071 }
6072
6073 /* Raise #DB if general access detect is enabled. */
6074 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD)
6075 {
6076 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
6077 return iemRaiseDebugException(pVCpu);
6078 }
6079
6080 /*
6081 * Read the debug register and store it in the specified general register.
6082 */
6083 uint64_t drX;
6084 switch (iDrReg)
6085 {
6086 case 0:
6087 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6088 drX = pVCpu->cpum.GstCtx.dr[0];
6089 break;
6090 case 1:
6091 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6092 drX = pVCpu->cpum.GstCtx.dr[1];
6093 break;
6094 case 2:
6095 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6096 drX = pVCpu->cpum.GstCtx.dr[2];
6097 break;
6098 case 3:
6099 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6100 drX = pVCpu->cpum.GstCtx.dr[3];
6101 break;
6102 case 6:
6103 case 4:
6104 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
6105 drX = pVCpu->cpum.GstCtx.dr[6];
6106 drX |= X86_DR6_RA1_MASK;
6107 drX &= ~X86_DR6_RAZ_MASK;
6108 break;
6109 case 7:
6110 case 5:
6111 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6112 drX = pVCpu->cpum.GstCtx.dr[7];
6113 drX |=X86_DR7_RA1_MASK;
6114 drX &= ~X86_DR7_RAZ_MASK;
6115 break;
6116 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
6117 }
6118
6119 /** @todo SVM nested-guest intercept for DR8-DR15? */
6120 /*
6121 * Check for any SVM nested-guest intercepts for the DRx read.
6122 */
6123 if (IEM_SVM_IS_READ_DR_INTERCEPT_SET(pVCpu, iDrReg))
6124 {
6125 Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg));
6126 IEM_SVM_UPDATE_NRIP(pVCpu);
6127 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),
6128 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
6129 }
6130
6131 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6132 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = drX;
6133 else
6134 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)drX;
6135
6136 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6137 return VINF_SUCCESS;
6138}
6139
6140
6141/**
6142 * Implements mov DRx,GReg.
6143 *
6144 * @param iDrReg The DRx register to write (valid).
6145 * @param iGReg The general register to load the DRx value from.
6146 */
6147IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
6148{
6149#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6150 /*
6151 * Check nested-guest VMX intercept.
6152 * Unlike most other intercepts, the Mov DRx intercept takes preceedence
6153 * over CPL and CR4.DE and even DR4/DR5 checks.
6154 *
6155 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
6156 */
6157 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6158 {
6159 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovDrX(pVCpu, VMXINSTRID_MOV_TO_DRX, iDrReg, iGReg, cbInstr);
6160 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6161 return rcStrict;
6162 }
6163#endif
6164
6165 /*
6166 * Check preconditions.
6167 */
6168 if (pVCpu->iem.s.uCpl != 0)
6169 return iemRaiseGeneralProtectionFault0(pVCpu);
6170 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6171 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_CR4);
6172
6173 if (iDrReg == 4 || iDrReg == 5)
6174 {
6175 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE)
6176 {
6177 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
6178 return iemRaiseGeneralProtectionFault0(pVCpu);
6179 }
6180 iDrReg += 2;
6181 }
6182
6183 /* Raise #DB if general access detect is enabled. */
6184 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
6185 * \#GP? */
6186 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD)
6187 {
6188 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
6189 return iemRaiseDebugException(pVCpu);
6190 }
6191
6192 /*
6193 * Read the new value from the source register.
6194 */
6195 uint64_t uNewDrX;
6196 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6197 uNewDrX = iemGRegFetchU64(pVCpu, iGReg);
6198 else
6199 uNewDrX = iemGRegFetchU32(pVCpu, iGReg);
6200
6201 /*
6202 * Adjust it.
6203 */
6204 switch (iDrReg)
6205 {
6206 case 0:
6207 case 1:
6208 case 2:
6209 case 3:
6210 /* nothing to adjust */
6211 break;
6212
6213 case 6:
6214 if (uNewDrX & X86_DR6_MBZ_MASK)
6215 {
6216 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
6217 return iemRaiseGeneralProtectionFault0(pVCpu);
6218 }
6219 uNewDrX |= X86_DR6_RA1_MASK;
6220 uNewDrX &= ~X86_DR6_RAZ_MASK;
6221 break;
6222
6223 case 7:
6224 if (uNewDrX & X86_DR7_MBZ_MASK)
6225 {
6226 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
6227 return iemRaiseGeneralProtectionFault0(pVCpu);
6228 }
6229 uNewDrX |= X86_DR7_RA1_MASK;
6230 uNewDrX &= ~X86_DR7_RAZ_MASK;
6231 break;
6232
6233 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6234 }
6235
6236 /** @todo SVM nested-guest intercept for DR8-DR15? */
6237 /*
6238 * Check for any SVM nested-guest intercepts for the DRx write.
6239 */
6240 if (IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(pVCpu, iDrReg))
6241 {
6242 Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg));
6243 IEM_SVM_UPDATE_NRIP(pVCpu);
6244 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),
6245 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
6246 }
6247
6248 /*
6249 * Do the actual setting.
6250 */
6251 if (iDrReg < 4)
6252 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6253 else if (iDrReg == 6)
6254 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
6255
6256 int rc = CPUMSetGuestDRx(pVCpu, iDrReg, uNewDrX);
6257 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc);
6258
6259 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6260 return VINF_SUCCESS;
6261}
6262
6263
6264/**
6265 * Implements 'INVLPG m'.
6266 *
6267 * @param GCPtrPage The effective address of the page to invalidate.
6268 * @remarks Updates the RIP.
6269 */
6270IEM_CIMPL_DEF_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage)
6271{
6272 /* ring-0 only. */
6273 if (pVCpu->iem.s.uCpl != 0)
6274 return iemRaiseGeneralProtectionFault0(pVCpu);
6275 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6276 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
6277
6278#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6279 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6280 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INVLPG_EXIT))
6281 {
6282 Log(("invlpg: Guest intercept (%RGp) -> VM-exit\n", GCPtrPage));
6283 return iemVmxVmexitInstrInvlpg(pVCpu, GCPtrPage, cbInstr);
6284 }
6285#endif
6286
6287 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
6288 {
6289 Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
6290 IEM_SVM_UPDATE_NRIP(pVCpu);
6291 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_INVLPG,
6292 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? GCPtrPage : 0, 0 /* uExitInfo2 */);
6293 }
6294
6295 int rc = PGMInvalidatePage(pVCpu, GCPtrPage);
6296 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6297
6298 if (rc == VINF_SUCCESS)
6299 return VINF_SUCCESS;
6300 if (rc == VINF_PGM_SYNC_CR3)
6301 return iemSetPassUpStatus(pVCpu, rc);
6302
6303 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6304 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", GCPtrPage, rc));
6305 return rc;
6306}
6307
6308
6309/**
6310 * Implements INVPCID.
6311 *
6312 * @param iEffSeg The segment of the invpcid descriptor.
6313 * @param GCPtrInvpcidDesc The address of invpcid descriptor.
6314 * @param uInvpcidType The invalidation type.
6315 * @remarks Updates the RIP.
6316 */
6317IEM_CIMPL_DEF_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint8_t, uInvpcidType)
6318{
6319 /*
6320 * Check preconditions.
6321 */
6322 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fInvpcid)
6323 return iemRaiseUndefinedOpcode(pVCpu);
6324
6325 /* When in VMX non-root mode and INVPCID is not enabled, it results in #UD. */
6326 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6327 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_INVPCID))
6328 {
6329 Log(("invpcid: Not enabled for nested-guest execution -> #UD\n"));
6330 return iemRaiseUndefinedOpcode(pVCpu);
6331 }
6332
6333 if (pVCpu->iem.s.uCpl != 0)
6334 {
6335 Log(("invpcid: CPL != 0 -> #GP(0)\n"));
6336 return iemRaiseGeneralProtectionFault0(pVCpu);
6337 }
6338
6339 if (IEM_IS_V86_MODE(pVCpu))
6340 {
6341 Log(("invpcid: v8086 mode -> #GP(0)\n"));
6342 return iemRaiseGeneralProtectionFault0(pVCpu);
6343 }
6344
6345 /*
6346 * Check nested-guest intercept.
6347 *
6348 * INVPCID causes a VM-exit if "enable INVPCID" and "INVLPG exiting" are
6349 * both set. We have already checked the former earlier in this function.
6350 *
6351 * CPL and virtual-8086 mode checks take priority over this VM-exit.
6352 * See Intel spec. "25.1.1 Relative Priority of Faults and VM Exits".
6353 */
6354 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6355 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INVLPG_EXIT))
6356 {
6357 Log(("invpcid: Guest intercept -> #VM-exit\n"));
6358 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_INVPCID, VMXINSTRID_NONE, cbInstr);
6359 }
6360
6361 if (uInvpcidType > X86_INVPCID_TYPE_MAX_VALID)
6362 {
6363 Log(("invpcid: invalid/unrecognized invpcid type %#x -> #GP(0)\n", uInvpcidType));
6364 return iemRaiseGeneralProtectionFault0(pVCpu);
6365 }
6366 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
6367
6368 /*
6369 * Fetch the invpcid descriptor from guest memory.
6370 */
6371 RTUINT128U uDesc;
6372 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInvpcidDesc);
6373 if (rcStrict == VINF_SUCCESS)
6374 {
6375 /*
6376 * Validate the descriptor.
6377 */
6378 if (uDesc.s.Lo > 0xfff)
6379 {
6380 Log(("invpcid: reserved bits set in invpcid descriptor %#RX64 -> #GP(0)\n", uDesc.s.Lo));
6381 return iemRaiseGeneralProtectionFault0(pVCpu);
6382 }
6383
6384 RTGCUINTPTR64 const GCPtrInvAddr = uDesc.s.Hi;
6385 uint8_t const uPcid = uDesc.s.Lo & UINT64_C(0xfff);
6386 uint32_t const uCr4 = pVCpu->cpum.GstCtx.cr4;
6387 uint64_t const uCr3 = pVCpu->cpum.GstCtx.cr3;
6388 switch (uInvpcidType)
6389 {
6390 case X86_INVPCID_TYPE_INDV_ADDR:
6391 {
6392 if (!IEM_IS_CANONICAL(GCPtrInvAddr))
6393 {
6394 Log(("invpcid: invalidation address %#RGP is not canonical -> #GP(0)\n", GCPtrInvAddr));
6395 return iemRaiseGeneralProtectionFault0(pVCpu);
6396 }
6397 if ( !(uCr4 & X86_CR4_PCIDE)
6398 && uPcid != 0)
6399 {
6400 Log(("invpcid: invalid pcid %#x\n", uPcid));
6401 return iemRaiseGeneralProtectionFault0(pVCpu);
6402 }
6403
6404 /* Invalidate mappings for the linear address tagged with PCID except global translations. */
6405 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6406 break;
6407 }
6408
6409 case X86_INVPCID_TYPE_SINGLE_CONTEXT:
6410 {
6411 if ( !(uCr4 & X86_CR4_PCIDE)
6412 && uPcid != 0)
6413 {
6414 Log(("invpcid: invalid pcid %#x\n", uPcid));
6415 return iemRaiseGeneralProtectionFault0(pVCpu);
6416 }
6417 /* Invalidate all mappings associated with PCID except global translations. */
6418 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6419 break;
6420 }
6421
6422 case X86_INVPCID_TYPE_ALL_CONTEXT_INCL_GLOBAL:
6423 {
6424 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
6425 break;
6426 }
6427
6428 case X86_INVPCID_TYPE_ALL_CONTEXT_EXCL_GLOBAL:
6429 {
6430 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6431 break;
6432 }
6433 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6434 }
6435 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6436 }
6437 return rcStrict;
6438}
6439
6440
6441/**
6442 * Implements INVD.
6443 */
6444IEM_CIMPL_DEF_0(iemCImpl_invd)
6445{
6446 if (pVCpu->iem.s.uCpl != 0)
6447 {
6448 Log(("invd: CPL != 0 -> #GP(0)\n"));
6449 return iemRaiseGeneralProtectionFault0(pVCpu);
6450 }
6451
6452 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6453 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_INVD, cbInstr);
6454
6455 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_INVD, SVM_EXIT_INVD, 0, 0);
6456
6457 /* We currently take no action here. */
6458 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6459 return VINF_SUCCESS;
6460}
6461
6462
6463/**
6464 * Implements WBINVD.
6465 */
6466IEM_CIMPL_DEF_0(iemCImpl_wbinvd)
6467{
6468 if (pVCpu->iem.s.uCpl != 0)
6469 {
6470 Log(("wbinvd: CPL != 0 -> #GP(0)\n"));
6471 return iemRaiseGeneralProtectionFault0(pVCpu);
6472 }
6473
6474 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6475 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_WBINVD, cbInstr);
6476
6477 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_WBINVD, SVM_EXIT_WBINVD, 0, 0);
6478
6479 /* We currently take no action here. */
6480 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6481 return VINF_SUCCESS;
6482}
6483
6484
6485/** Opcode 0x0f 0xaa. */
6486IEM_CIMPL_DEF_0(iemCImpl_rsm)
6487{
6488 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RSM, SVM_EXIT_RSM, 0, 0);
6489 NOREF(cbInstr);
6490 return iemRaiseUndefinedOpcode(pVCpu);
6491}
6492
6493
6494/**
6495 * Implements RDTSC.
6496 */
6497IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
6498{
6499 /*
6500 * Check preconditions.
6501 */
6502 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fTsc)
6503 return iemRaiseUndefinedOpcode(pVCpu);
6504
6505 if (pVCpu->iem.s.uCpl != 0)
6506 {
6507 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6508 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD)
6509 {
6510 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
6511 return iemRaiseGeneralProtectionFault0(pVCpu);
6512 }
6513 }
6514
6515 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6516 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDTSC_EXIT))
6517 {
6518 Log(("rdtsc: Guest intercept -> VM-exit\n"));
6519 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDTSC, cbInstr);
6520 }
6521
6522 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
6523 {
6524 Log(("rdtsc: Guest intercept -> #VMEXIT\n"));
6525 IEM_SVM_UPDATE_NRIP(pVCpu);
6526 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6527 }
6528
6529 /*
6530 * Do the job.
6531 */
6532 uint64_t uTicks = TMCpuTickGet(pVCpu);
6533#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
6534 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
6535#endif
6536 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks);
6537 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks);
6538 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX); /* For IEMExecDecodedRdtsc. */
6539 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6540 return VINF_SUCCESS;
6541}
6542
6543
6544/**
6545 * Implements RDTSC.
6546 */
6547IEM_CIMPL_DEF_0(iemCImpl_rdtscp)
6548{
6549 /*
6550 * Check preconditions.
6551 */
6552 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fRdTscP)
6553 return iemRaiseUndefinedOpcode(pVCpu);
6554
6555 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6556 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_RDTSCP))
6557 {
6558 Log(("rdtscp: Not enabled for VMX non-root mode -> #UD\n"));
6559 return iemRaiseUndefinedOpcode(pVCpu);
6560 }
6561
6562 if (pVCpu->iem.s.uCpl != 0)
6563 {
6564 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6565 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD)
6566 {
6567 Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
6568 return iemRaiseGeneralProtectionFault0(pVCpu);
6569 }
6570 }
6571
6572 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6573 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDTSC_EXIT))
6574 {
6575 Log(("rdtscp: Guest intercept -> VM-exit\n"));
6576 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDTSCP, cbInstr);
6577 }
6578 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
6579 {
6580 Log(("rdtscp: Guest intercept -> #VMEXIT\n"));
6581 IEM_SVM_UPDATE_NRIP(pVCpu);
6582 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6583 }
6584
6585 /*
6586 * Do the job.
6587 * Query the MSR first in case of trips to ring-3.
6588 */
6589 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TSC_AUX);
6590 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pVCpu->cpum.GstCtx.rcx);
6591 if (rcStrict == VINF_SUCCESS)
6592 {
6593 /* Low dword of the TSC_AUX msr only. */
6594 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff);
6595
6596 uint64_t uTicks = TMCpuTickGet(pVCpu);
6597#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
6598 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
6599#endif
6600 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks);
6601 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks);
6602 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX); /* For IEMExecDecodedRdtscp. */
6603 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6604 }
6605 return rcStrict;
6606}
6607
6608
6609/**
6610 * Implements RDPMC.
6611 */
6612IEM_CIMPL_DEF_0(iemCImpl_rdpmc)
6613{
6614 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6615
6616 if ( pVCpu->iem.s.uCpl != 0
6617 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCE))
6618 return iemRaiseGeneralProtectionFault0(pVCpu);
6619
6620 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6621 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDPMC_EXIT))
6622 {
6623 Log(("rdpmc: Guest intercept -> VM-exit\n"));
6624 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDPMC, cbInstr);
6625 }
6626
6627 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
6628 {
6629 Log(("rdpmc: Guest intercept -> #VMEXIT\n"));
6630 IEM_SVM_UPDATE_NRIP(pVCpu);
6631 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6632 }
6633
6634 /** @todo Emulate performance counters, for now just return 0. */
6635 pVCpu->cpum.GstCtx.rax = 0;
6636 pVCpu->cpum.GstCtx.rdx = 0;
6637 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
6638 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
6639 * ecx but see @bugref{3472}! */
6640
6641 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6642 return VINF_SUCCESS;
6643}
6644
6645
6646/**
6647 * Implements RDMSR.
6648 */
6649IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
6650{
6651 /*
6652 * Check preconditions.
6653 */
6654 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
6655 return iemRaiseUndefinedOpcode(pVCpu);
6656 if (pVCpu->iem.s.uCpl != 0)
6657 return iemRaiseGeneralProtectionFault0(pVCpu);
6658
6659 /*
6660 * Check nested-guest intercepts.
6661 */
6662#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6663 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6664 {
6665 if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_RDMSR, pVCpu->cpum.GstCtx.ecx))
6666 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDMSR, cbInstr);
6667 }
6668#endif
6669
6670#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6671 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
6672 {
6673 VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, pVCpu->cpum.GstCtx.ecx, false /* fWrite */);
6674 if (rcStrict == VINF_SVM_VMEXIT)
6675 return VINF_SUCCESS;
6676 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
6677 {
6678 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.ecx, VBOXSTRICTRC_VAL(rcStrict)));
6679 return rcStrict;
6680 }
6681 }
6682#endif
6683
6684 /*
6685 * Do the job.
6686 */
6687 RTUINT64U uValue;
6688 /** @todo make CPUMAllMsrs.cpp import the necessary MSR state. */
6689 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
6690
6691 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pVCpu->cpum.GstCtx.ecx, &uValue.u);
6692 if (rcStrict == VINF_SUCCESS)
6693 {
6694 pVCpu->cpum.GstCtx.rax = uValue.s.Lo;
6695 pVCpu->cpum.GstCtx.rdx = uValue.s.Hi;
6696 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
6697
6698 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6699 return VINF_SUCCESS;
6700 }
6701
6702#ifndef IN_RING3
6703 /* Deferred to ring-3. */
6704 if (rcStrict == VINF_CPUM_R3_MSR_READ)
6705 {
6706 Log(("IEM: rdmsr(%#x) -> ring-3\n", pVCpu->cpum.GstCtx.ecx));
6707 return rcStrict;
6708 }
6709#endif
6710
6711 /* Often a unimplemented MSR or MSR bit, so worth logging. */
6712 if (pVCpu->iem.s.cLogRelRdMsr < 32)
6713 {
6714 pVCpu->iem.s.cLogRelRdMsr++;
6715 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx));
6716 }
6717 else
6718 Log(( "IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx));
6719 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
6720 return iemRaiseGeneralProtectionFault0(pVCpu);
6721}
6722
6723
6724/**
6725 * Implements WRMSR.
6726 */
6727IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
6728{
6729 /*
6730 * Check preconditions.
6731 */
6732 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
6733 return iemRaiseUndefinedOpcode(pVCpu);
6734 if (pVCpu->iem.s.uCpl != 0)
6735 return iemRaiseGeneralProtectionFault0(pVCpu);
6736
6737 RTUINT64U uValue;
6738 uValue.s.Lo = pVCpu->cpum.GstCtx.eax;
6739 uValue.s.Hi = pVCpu->cpum.GstCtx.edx;
6740
6741 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
6742
6743 /** @todo make CPUMAllMsrs.cpp import the necessary MSR state. */
6744 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
6745
6746 /*
6747 * Check nested-guest intercepts.
6748 */
6749#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6750 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6751 {
6752 if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_WRMSR, idMsr))
6753 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_WRMSR, cbInstr);
6754 }
6755#endif
6756
6757#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6758 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
6759 {
6760 VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, idMsr, true /* fWrite */);
6761 if (rcStrict == VINF_SVM_VMEXIT)
6762 return VINF_SUCCESS;
6763 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
6764 {
6765 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", idMsr, VBOXSTRICTRC_VAL(rcStrict)));
6766 return rcStrict;
6767 }
6768 }
6769#endif
6770
6771 /*
6772 * Do the job.
6773 */
6774 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, idMsr, uValue.u);
6775 if (rcStrict == VINF_SUCCESS)
6776 {
6777 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6778 return VINF_SUCCESS;
6779 }
6780
6781#ifndef IN_RING3
6782 /* Deferred to ring-3. */
6783 if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
6784 {
6785 Log(("IEM: wrmsr(%#x) -> ring-3\n", idMsr));
6786 return rcStrict;
6787 }
6788#endif
6789
6790 /* Often a unimplemented MSR or MSR bit, so worth logging. */
6791 if (pVCpu->iem.s.cLogRelWrMsr < 32)
6792 {
6793 pVCpu->iem.s.cLogRelWrMsr++;
6794 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
6795 }
6796 else
6797 Log(( "IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
6798 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
6799 return iemRaiseGeneralProtectionFault0(pVCpu);
6800}
6801
6802
6803/**
6804 * Implements 'IN eAX, port'.
6805 *
6806 * @param u16Port The source port.
6807 * @param fImm Whether the port was specified through an immediate operand
6808 * or the implicit DX register.
6809 * @param cbReg The register size.
6810 */
6811IEM_CIMPL_DEF_3(iemCImpl_in, uint16_t, u16Port, bool, fImm, uint8_t, cbReg)
6812{
6813 /*
6814 * CPL check
6815 */
6816 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg);
6817 if (rcStrict != VINF_SUCCESS)
6818 return rcStrict;
6819
6820 /*
6821 * Check VMX nested-guest IO intercept.
6822 */
6823#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6824 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6825 {
6826 rcStrict = iemVmxVmexitInstrIo(pVCpu, VMXINSTRID_IO_IN, u16Port, fImm, cbReg, cbInstr);
6827 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6828 return rcStrict;
6829 }
6830#else
6831 RT_NOREF(fImm);
6832#endif
6833
6834 /*
6835 * Check SVM nested-guest IO intercept.
6836 */
6837#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6838 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
6839 {
6840 uint8_t cAddrSizeBits;
6841 switch (pVCpu->iem.s.enmEffAddrMode)
6842 {
6843 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
6844 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
6845 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
6846 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6847 }
6848 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
6849 false /* fRep */, false /* fStrIo */, cbInstr);
6850 if (rcStrict == VINF_SVM_VMEXIT)
6851 return VINF_SUCCESS;
6852 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
6853 {
6854 Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
6855 VBOXSTRICTRC_VAL(rcStrict)));
6856 return rcStrict;
6857 }
6858 }
6859#endif
6860
6861 /*
6862 * Perform the I/O.
6863 */
6864 uint32_t u32Value = 0;
6865 rcStrict = IOMIOPortRead(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, &u32Value, cbReg);
6866 if (IOM_SUCCESS(rcStrict))
6867 {
6868 switch (cbReg)
6869 {
6870 case 1: pVCpu->cpum.GstCtx.al = (uint8_t)u32Value; break;
6871 case 2: pVCpu->cpum.GstCtx.ax = (uint16_t)u32Value; break;
6872 case 4: pVCpu->cpum.GstCtx.rax = u32Value; break;
6873 default: AssertFailedReturn(VERR_IEM_IPE_3);
6874 }
6875 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6876 pVCpu->iem.s.cPotentialExits++;
6877 if (rcStrict != VINF_SUCCESS)
6878 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6879 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
6880
6881 /*
6882 * Check for I/O breakpoints.
6883 */
6884 uint32_t const uDr7 = pVCpu->cpum.GstCtx.dr[7];
6885 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
6886 && X86_DR7_ANY_RW_IO(uDr7)
6887 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE))
6888 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM))))
6889 {
6890 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);
6891 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, IEM_GET_CTX(pVCpu), u16Port, cbReg);
6892 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6893 rcStrict = iemRaiseDebugException(pVCpu);
6894 }
6895 }
6896
6897 return rcStrict;
6898}
6899
6900
6901/**
6902 * Implements 'IN eAX, DX'.
6903 *
6904 * @param cbReg The register size.
6905 */
6906IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
6907{
6908 return IEM_CIMPL_CALL_3(iemCImpl_in, pVCpu->cpum.GstCtx.dx, false /* fImm */, cbReg);
6909}
6910
6911
6912/**
6913 * Implements 'OUT port, eAX'.
6914 *
6915 * @param u16Port The destination port.
6916 * @param fImm Whether the port was specified through an immediate operand
6917 * or the implicit DX register.
6918 * @param cbReg The register size.
6919 */
6920IEM_CIMPL_DEF_3(iemCImpl_out, uint16_t, u16Port, bool, fImm, uint8_t, cbReg)
6921{
6922 /*
6923 * CPL check
6924 */
6925 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg);
6926 if (rcStrict != VINF_SUCCESS)
6927 return rcStrict;
6928
6929 /*
6930 * Check VMX nested-guest I/O intercept.
6931 */
6932#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6933 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6934 {
6935 rcStrict = iemVmxVmexitInstrIo(pVCpu, VMXINSTRID_IO_OUT, u16Port, fImm, cbReg, cbInstr);
6936 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6937 return rcStrict;
6938 }
6939#else
6940 RT_NOREF(fImm);
6941#endif
6942
6943 /*
6944 * Check SVM nested-guest I/O intercept.
6945 */
6946#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6947 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
6948 {
6949 uint8_t cAddrSizeBits;
6950 switch (pVCpu->iem.s.enmEffAddrMode)
6951 {
6952 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
6953 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
6954 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
6955 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6956 }
6957 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
6958 false /* fRep */, false /* fStrIo */, cbInstr);
6959 if (rcStrict == VINF_SVM_VMEXIT)
6960 return VINF_SUCCESS;
6961 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
6962 {
6963 Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
6964 VBOXSTRICTRC_VAL(rcStrict)));
6965 return rcStrict;
6966 }
6967 }
6968#endif
6969
6970 /*
6971 * Perform the I/O.
6972 */
6973 uint32_t u32Value;
6974 switch (cbReg)
6975 {
6976 case 1: u32Value = pVCpu->cpum.GstCtx.al; break;
6977 case 2: u32Value = pVCpu->cpum.GstCtx.ax; break;
6978 case 4: u32Value = pVCpu->cpum.GstCtx.eax; break;
6979 default: AssertFailedReturn(VERR_IEM_IPE_4);
6980 }
6981 rcStrict = IOMIOPortWrite(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, u32Value, cbReg);
6982 if (IOM_SUCCESS(rcStrict))
6983 {
6984 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6985 pVCpu->iem.s.cPotentialExits++;
6986 if (rcStrict != VINF_SUCCESS)
6987 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6988 Assert(rcStrict == VINF_SUCCESS); /* assumed below */
6989
6990 /*
6991 * Check for I/O breakpoints.
6992 */
6993 uint32_t const uDr7 = pVCpu->cpum.GstCtx.dr[7];
6994 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
6995 && X86_DR7_ANY_RW_IO(uDr7)
6996 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE))
6997 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM))))
6998 {
6999 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);
7000 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, IEM_GET_CTX(pVCpu), u16Port, cbReg);
7001 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7002 rcStrict = iemRaiseDebugException(pVCpu);
7003 }
7004 }
7005 return rcStrict;
7006}
7007
7008
7009/**
7010 * Implements 'OUT DX, eAX'.
7011 *
7012 * @param cbReg The register size.
7013 */
7014IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
7015{
7016 return IEM_CIMPL_CALL_3(iemCImpl_out, pVCpu->cpum.GstCtx.dx, false /* fImm */, cbReg);
7017}
7018
7019
7020/**
7021 * Implements 'CLI'.
7022 */
7023IEM_CIMPL_DEF_0(iemCImpl_cli)
7024{
7025 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
7026 uint32_t const fEflOld = fEfl;
7027
7028 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);
7029 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
7030 {
7031 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
7032 if (!(fEfl & X86_EFL_VM))
7033 {
7034 if (pVCpu->iem.s.uCpl <= uIopl)
7035 fEfl &= ~X86_EFL_IF;
7036 else if ( pVCpu->iem.s.uCpl == 3
7037 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI) )
7038 fEfl &= ~X86_EFL_VIF;
7039 else
7040 return iemRaiseGeneralProtectionFault0(pVCpu);
7041 }
7042 /* V8086 */
7043 else if (uIopl == 3)
7044 fEfl &= ~X86_EFL_IF;
7045 else if ( uIopl < 3
7046 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) )
7047 fEfl &= ~X86_EFL_VIF;
7048 else
7049 return iemRaiseGeneralProtectionFault0(pVCpu);
7050 }
7051 /* real mode */
7052 else
7053 fEfl &= ~X86_EFL_IF;
7054
7055 /* Commit. */
7056 IEMMISC_SET_EFL(pVCpu, fEfl);
7057 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7058 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld);
7059 return VINF_SUCCESS;
7060}
7061
7062
7063/**
7064 * Implements 'STI'.
7065 */
7066IEM_CIMPL_DEF_0(iemCImpl_sti)
7067{
7068 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
7069 uint32_t const fEflOld = fEfl;
7070
7071 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);
7072 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
7073 {
7074 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
7075 if (!(fEfl & X86_EFL_VM))
7076 {
7077 if (pVCpu->iem.s.uCpl <= uIopl)
7078 fEfl |= X86_EFL_IF;
7079 else if ( pVCpu->iem.s.uCpl == 3
7080 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI)
7081 && !(fEfl & X86_EFL_VIP) )
7082 fEfl |= X86_EFL_VIF;
7083 else
7084 return iemRaiseGeneralProtectionFault0(pVCpu);
7085 }
7086 /* V8086 */
7087 else if (uIopl == 3)
7088 fEfl |= X86_EFL_IF;
7089 else if ( uIopl < 3
7090 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME)
7091 && !(fEfl & X86_EFL_VIP) )
7092 fEfl |= X86_EFL_VIF;
7093 else
7094 return iemRaiseGeneralProtectionFault0(pVCpu);
7095 }
7096 /* real mode */
7097 else
7098 fEfl |= X86_EFL_IF;
7099
7100 /* Commit. */
7101 IEMMISC_SET_EFL(pVCpu, fEfl);
7102 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7103 if (!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF))
7104 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
7105 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
7106 return VINF_SUCCESS;
7107}
7108
7109
7110/**
7111 * Implements 'HLT'.
7112 */
7113IEM_CIMPL_DEF_0(iemCImpl_hlt)
7114{
7115 if (pVCpu->iem.s.uCpl != 0)
7116 return iemRaiseGeneralProtectionFault0(pVCpu);
7117
7118 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7119 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_HLT_EXIT))
7120 {
7121 Log2(("hlt: Guest intercept -> VM-exit\n"));
7122 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_HLT, cbInstr);
7123 }
7124
7125 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_HLT))
7126 {
7127 Log2(("hlt: Guest intercept -> #VMEXIT\n"));
7128 IEM_SVM_UPDATE_NRIP(pVCpu);
7129 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7130 }
7131
7132 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7133 return VINF_EM_HALT;
7134}
7135
7136
7137/**
7138 * Implements 'MONITOR'.
7139 */
7140IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
7141{
7142 /*
7143 * Permission checks.
7144 */
7145 if (pVCpu->iem.s.uCpl != 0)
7146 {
7147 Log2(("monitor: CPL != 0\n"));
7148 return iemRaiseUndefinedOpcode(pVCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
7149 }
7150 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
7151 {
7152 Log2(("monitor: Not in CPUID\n"));
7153 return iemRaiseUndefinedOpcode(pVCpu);
7154 }
7155
7156 /*
7157 * Check VMX guest-intercept.
7158 * This should be considered a fault-like VM-exit.
7159 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
7160 */
7161 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7162 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_MONITOR_EXIT))
7163 {
7164 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
7165 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_MONITOR, cbInstr);
7166 }
7167
7168 /*
7169 * Gather the operands and validate them.
7170 */
7171 RTGCPTR GCPtrMem = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
7172 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
7173 uint32_t uEdx = pVCpu->cpum.GstCtx.edx;
7174/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
7175 * \#GP first. */
7176 if (uEcx != 0)
7177 {
7178 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx);
7179 return iemRaiseGeneralProtectionFault0(pVCpu);
7180 }
7181
7182 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
7183 if (rcStrict != VINF_SUCCESS)
7184 return rcStrict;
7185
7186 RTGCPHYS GCPhysMem;
7187 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
7188 if (rcStrict != VINF_SUCCESS)
7189 return rcStrict;
7190
7191#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7192 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7193 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
7194 {
7195 /*
7196 * MONITOR does not access the memory, just monitors the address. However,
7197 * if the address falls in the APIC-access page, the address monitored must
7198 * instead be the corresponding address in the virtual-APIC page.
7199 *
7200 * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
7201 */
7202 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem);
7203 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
7204 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
7205 return rcStrict;
7206 }
7207#endif
7208
7209 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
7210 {
7211 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
7212 IEM_SVM_UPDATE_NRIP(pVCpu);
7213 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7214 }
7215
7216 /*
7217 * Call EM to prepare the monitor/wait.
7218 */
7219 rcStrict = EMMonitorWaitPrepare(pVCpu, pVCpu->cpum.GstCtx.rax, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.rdx, GCPhysMem);
7220 Assert(rcStrict == VINF_SUCCESS);
7221
7222 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7223 return rcStrict;
7224}
7225
7226
7227/**
7228 * Implements 'MWAIT'.
7229 */
7230IEM_CIMPL_DEF_0(iemCImpl_mwait)
7231{
7232 /*
7233 * Permission checks.
7234 */
7235 if (pVCpu->iem.s.uCpl != 0)
7236 {
7237 Log2(("mwait: CPL != 0\n"));
7238 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
7239 * EFLAGS.VM then.) */
7240 return iemRaiseUndefinedOpcode(pVCpu);
7241 }
7242 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
7243 {
7244 Log2(("mwait: Not in CPUID\n"));
7245 return iemRaiseUndefinedOpcode(pVCpu);
7246 }
7247
7248 /* Check VMX nested-guest intercept. */
7249 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7250 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_MWAIT_EXIT))
7251 IEM_VMX_VMEXIT_MWAIT_RET(pVCpu, EMMonitorIsArmed(pVCpu), cbInstr);
7252
7253 /*
7254 * Gather the operands and validate them.
7255 */
7256 uint32_t const uEax = pVCpu->cpum.GstCtx.eax;
7257 uint32_t const uEcx = pVCpu->cpum.GstCtx.ecx;
7258 if (uEcx != 0)
7259 {
7260 /* Only supported extension is break on IRQ when IF=0. */
7261 if (uEcx > 1)
7262 {
7263 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
7264 return iemRaiseGeneralProtectionFault0(pVCpu);
7265 }
7266 uint32_t fMWaitFeatures = 0;
7267 uint32_t uIgnore = 0;
7268 CPUMGetGuestCpuId(pVCpu, 5, 0, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
7269 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
7270 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
7271 {
7272 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
7273 return iemRaiseGeneralProtectionFault0(pVCpu);
7274 }
7275
7276#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7277 /*
7278 * If the interrupt-window exiting control is set or a virtual-interrupt is pending
7279 * for delivery; and interrupts are disabled the processor does not enter its
7280 * mwait state but rather passes control to the next instruction.
7281 *
7282 * See Intel spec. 25.3 "Changes to Instruction Behavior In VMX Non-root Operation".
7283 */
7284 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7285 && !pVCpu->cpum.GstCtx.eflags.Bits.u1IF)
7286 {
7287 if ( IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INT_WINDOW_EXIT)
7288 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
7289 {
7290 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7291 return VINF_SUCCESS;
7292 }
7293 }
7294#endif
7295 }
7296
7297 /*
7298 * Check SVM nested-guest mwait intercepts.
7299 */
7300 if ( IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT_ARMED)
7301 && EMMonitorIsArmed(pVCpu))
7302 {
7303 Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n"));
7304 IEM_SVM_UPDATE_NRIP(pVCpu);
7305 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7306 }
7307 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
7308 {
7309 Log2(("mwait: Guest intercept -> #VMEXIT\n"));
7310 IEM_SVM_UPDATE_NRIP(pVCpu);
7311 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7312 }
7313
7314 /*
7315 * Call EM to prepare the monitor/wait.
7316 */
7317 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(pVCpu, uEax, uEcx);
7318
7319 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7320 return rcStrict;
7321}
7322
7323
7324/**
7325 * Implements 'SWAPGS'.
7326 */
7327IEM_CIMPL_DEF_0(iemCImpl_swapgs)
7328{
7329 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); /* Caller checks this. */
7330
7331 /*
7332 * Permission checks.
7333 */
7334 if (pVCpu->iem.s.uCpl != 0)
7335 {
7336 Log2(("swapgs: CPL != 0\n"));
7337 return iemRaiseUndefinedOpcode(pVCpu);
7338 }
7339
7340 /*
7341 * Do the job.
7342 */
7343 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_GS);
7344 uint64_t uOtherGsBase = pVCpu->cpum.GstCtx.msrKERNELGSBASE;
7345 pVCpu->cpum.GstCtx.msrKERNELGSBASE = pVCpu->cpum.GstCtx.gs.u64Base;
7346 pVCpu->cpum.GstCtx.gs.u64Base = uOtherGsBase;
7347
7348 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7349 return VINF_SUCCESS;
7350}
7351
7352
7353/**
7354 * Implements 'CPUID'.
7355 */
7356IEM_CIMPL_DEF_0(iemCImpl_cpuid)
7357{
7358 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7359 {
7360 Log2(("cpuid: Guest intercept -> VM-exit\n"));
7361 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_CPUID, cbInstr);
7362 }
7363
7364 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
7365 {
7366 Log2(("cpuid: Guest intercept -> #VMEXIT\n"));
7367 IEM_SVM_UPDATE_NRIP(pVCpu);
7368 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7369 }
7370
7371 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx,
7372 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
7373 pVCpu->cpum.GstCtx.rax &= UINT32_C(0xffffffff);
7374 pVCpu->cpum.GstCtx.rbx &= UINT32_C(0xffffffff);
7375 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff);
7376 pVCpu->cpum.GstCtx.rdx &= UINT32_C(0xffffffff);
7377 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
7378
7379 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7380 pVCpu->iem.s.cPotentialExits++;
7381 return VINF_SUCCESS;
7382}
7383
7384
7385/**
7386 * Implements 'AAD'.
7387 *
7388 * @param bImm The immediate operand.
7389 */
7390IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
7391{
7392 uint16_t const ax = pVCpu->cpum.GstCtx.ax;
7393 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
7394 pVCpu->cpum.GstCtx.ax = al;
7395 iemHlpUpdateArithEFlagsU8(pVCpu, al,
7396 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
7397 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
7398
7399 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7400 return VINF_SUCCESS;
7401}
7402
7403
7404/**
7405 * Implements 'AAM'.
7406 *
7407 * @param bImm The immediate operand. Cannot be 0.
7408 */
7409IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
7410{
7411 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
7412
7413 uint16_t const ax = pVCpu->cpum.GstCtx.ax;
7414 uint8_t const al = (uint8_t)ax % bImm;
7415 uint8_t const ah = (uint8_t)ax / bImm;
7416 pVCpu->cpum.GstCtx.ax = (ah << 8) + al;
7417 iemHlpUpdateArithEFlagsU8(pVCpu, al,
7418 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
7419 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
7420
7421 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7422 return VINF_SUCCESS;
7423}
7424
7425
7426/**
7427 * Implements 'DAA'.
7428 */
7429IEM_CIMPL_DEF_0(iemCImpl_daa)
7430{
7431 uint8_t const al = pVCpu->cpum.GstCtx.al;
7432 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF;
7433
7434 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
7435 || (al & 0xf) >= 10)
7436 {
7437 pVCpu->cpum.GstCtx.al = al + 6;
7438 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
7439 }
7440 else
7441 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
7442
7443 if (al >= 0x9a || fCarry)
7444 {
7445 pVCpu->cpum.GstCtx.al += 0x60;
7446 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
7447 }
7448 else
7449 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
7450
7451 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
7452 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7453 return VINF_SUCCESS;
7454}
7455
7456
7457/**
7458 * Implements 'DAS'.
7459 */
7460IEM_CIMPL_DEF_0(iemCImpl_das)
7461{
7462 uint8_t const uInputAL = pVCpu->cpum.GstCtx.al;
7463 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF;
7464
7465 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
7466 || (uInputAL & 0xf) >= 10)
7467 {
7468 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
7469 if (uInputAL < 6)
7470 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
7471 pVCpu->cpum.GstCtx.al = uInputAL - 6;
7472 }
7473 else
7474 {
7475 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
7476 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
7477 }
7478
7479 if (uInputAL >= 0x9a || fCarry)
7480 {
7481 pVCpu->cpum.GstCtx.al -= 0x60;
7482 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
7483 }
7484
7485 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
7486 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7487 return VINF_SUCCESS;
7488}
7489
7490
7491/**
7492 * Implements 'AAA'.
7493 */
7494IEM_CIMPL_DEF_0(iemCImpl_aaa)
7495{
7496 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
7497 {
7498 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
7499 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
7500 {
7501 iemAImpl_add_u16(&pVCpu->cpum.GstCtx.ax, 0x106, &pVCpu->cpum.GstCtx.eflags.u32);
7502 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
7503 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
7504 }
7505 else
7506 {
7507 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
7508 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
7509 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
7510 }
7511 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
7512 }
7513 else
7514 {
7515 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
7516 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
7517 {
7518 pVCpu->cpum.GstCtx.ax += UINT16_C(0x106);
7519 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
7520 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
7521 }
7522 else
7523 {
7524 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
7525 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
7526 }
7527 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
7528 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
7529 }
7530
7531 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7532 return VINF_SUCCESS;
7533}
7534
7535
7536/**
7537 * Implements 'AAS'.
7538 */
7539IEM_CIMPL_DEF_0(iemCImpl_aas)
7540{
7541 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
7542 {
7543 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
7544 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
7545 {
7546 iemAImpl_sub_u16(&pVCpu->cpum.GstCtx.ax, 0x106, &pVCpu->cpum.GstCtx.eflags.u32);
7547 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
7548 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
7549 }
7550 else
7551 {
7552 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
7553 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
7554 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
7555 }
7556 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
7557 }
7558 else
7559 {
7560 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
7561 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
7562 {
7563 pVCpu->cpum.GstCtx.ax -= UINT16_C(0x106);
7564 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
7565 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
7566 }
7567 else
7568 {
7569 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
7570 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
7571 }
7572 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
7573 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
7574 }
7575
7576 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7577 return VINF_SUCCESS;
7578}
7579
7580
7581/**
7582 * Implements the 16-bit version of 'BOUND'.
7583 *
7584 * @note We have separate 16-bit and 32-bit variants of this function due to
7585 * the decoder using unsigned parameters, whereas we want signed one to
7586 * do the job. This is significant for a recompiler.
7587 */
7588IEM_CIMPL_DEF_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound)
7589{
7590 /*
7591 * Check if the index is inside the bounds, otherwise raise #BR.
7592 */
7593 if ( idxArray >= idxLowerBound
7594 && idxArray <= idxUpperBound)
7595 {
7596 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7597 return VINF_SUCCESS;
7598 }
7599
7600 return iemRaiseBoundRangeExceeded(pVCpu);
7601}
7602
7603
7604/**
7605 * Implements the 32-bit version of 'BOUND'.
7606 */
7607IEM_CIMPL_DEF_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound)
7608{
7609 /*
7610 * Check if the index is inside the bounds, otherwise raise #BR.
7611 */
7612 if ( idxArray >= idxLowerBound
7613 && idxArray <= idxUpperBound)
7614 {
7615 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7616 return VINF_SUCCESS;
7617 }
7618
7619 return iemRaiseBoundRangeExceeded(pVCpu);
7620}
7621
7622
7623
7624/*
7625 * Instantiate the various string operation combinations.
7626 */
7627#define OP_SIZE 8
7628#define ADDR_SIZE 16
7629#include "IEMAllCImplStrInstr.cpp.h"
7630#define OP_SIZE 8
7631#define ADDR_SIZE 32
7632#include "IEMAllCImplStrInstr.cpp.h"
7633#define OP_SIZE 8
7634#define ADDR_SIZE 64
7635#include "IEMAllCImplStrInstr.cpp.h"
7636
7637#define OP_SIZE 16
7638#define ADDR_SIZE 16
7639#include "IEMAllCImplStrInstr.cpp.h"
7640#define OP_SIZE 16
7641#define ADDR_SIZE 32
7642#include "IEMAllCImplStrInstr.cpp.h"
7643#define OP_SIZE 16
7644#define ADDR_SIZE 64
7645#include "IEMAllCImplStrInstr.cpp.h"
7646
7647#define OP_SIZE 32
7648#define ADDR_SIZE 16
7649#include "IEMAllCImplStrInstr.cpp.h"
7650#define OP_SIZE 32
7651#define ADDR_SIZE 32
7652#include "IEMAllCImplStrInstr.cpp.h"
7653#define OP_SIZE 32
7654#define ADDR_SIZE 64
7655#include "IEMAllCImplStrInstr.cpp.h"
7656
7657#define OP_SIZE 64
7658#define ADDR_SIZE 32
7659#include "IEMAllCImplStrInstr.cpp.h"
7660#define OP_SIZE 64
7661#define ADDR_SIZE 64
7662#include "IEMAllCImplStrInstr.cpp.h"
7663
7664
7665/**
7666 * Implements 'XGETBV'.
7667 */
7668IEM_CIMPL_DEF_0(iemCImpl_xgetbv)
7669{
7670 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
7671 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)
7672 {
7673 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
7674 switch (uEcx)
7675 {
7676 case 0:
7677 break;
7678
7679 case 1: /** @todo Implement XCR1 support. */
7680 default:
7681 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx));
7682 return iemRaiseGeneralProtectionFault0(pVCpu);
7683
7684 }
7685 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx);
7686 pVCpu->cpum.GstCtx.rax = RT_LO_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]);
7687 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]);
7688
7689 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7690 return VINF_SUCCESS;
7691 }
7692 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n"));
7693 return iemRaiseUndefinedOpcode(pVCpu);
7694}
7695
7696
7697/**
7698 * Implements 'XSETBV'.
7699 */
7700IEM_CIMPL_DEF_0(iemCImpl_xsetbv)
7701{
7702 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)
7703 {
7704 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
7705 {
7706 Log2(("xsetbv: Guest intercept -> #VMEXIT\n"));
7707 IEM_SVM_UPDATE_NRIP(pVCpu);
7708 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7709 }
7710
7711 if (pVCpu->iem.s.uCpl == 0)
7712 {
7713 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx);
7714
7715 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7716 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_XSETBV, cbInstr);
7717
7718 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
7719 uint64_t uNewValue = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx);
7720 switch (uEcx)
7721 {
7722 case 0:
7723 {
7724 int rc = CPUMSetGuestXcr0(pVCpu, uNewValue);
7725 if (rc == VINF_SUCCESS)
7726 break;
7727 Assert(rc == VERR_CPUM_RAISE_GP_0);
7728 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
7729 return iemRaiseGeneralProtectionFault0(pVCpu);
7730 }
7731
7732 case 1: /** @todo Implement XCR1 support. */
7733 default:
7734 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
7735 return iemRaiseGeneralProtectionFault0(pVCpu);
7736
7737 }
7738
7739 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7740 return VINF_SUCCESS;
7741 }
7742
7743 Log(("xsetbv cpl=%u -> GP(0)\n", pVCpu->iem.s.uCpl));
7744 return iemRaiseGeneralProtectionFault0(pVCpu);
7745 }
7746 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n"));
7747 return iemRaiseUndefinedOpcode(pVCpu);
7748}
7749
7750#ifdef IN_RING3
7751
7752/** Argument package for iemCImpl_cmpxchg16b_fallback_rendezvous_callback. */
7753struct IEMCIMPLCX16ARGS
7754{
7755 PRTUINT128U pu128Dst;
7756 PRTUINT128U pu128RaxRdx;
7757 PRTUINT128U pu128RbxRcx;
7758 uint32_t *pEFlags;
7759# ifdef VBOX_STRICT
7760 uint32_t cCalls;
7761# endif
7762};
7763
7764/**
7765 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
7766 * Worker for iemCImpl_cmpxchg16b_fallback_rendezvous}
7767 */
7768static DECLCALLBACK(VBOXSTRICTRC) iemCImpl_cmpxchg16b_fallback_rendezvous_callback(PVM pVM, PVMCPU pVCpu, void *pvUser)
7769{
7770 RT_NOREF(pVM, pVCpu);
7771 struct IEMCIMPLCX16ARGS *pArgs = (struct IEMCIMPLCX16ARGS *)pvUser;
7772# ifdef VBOX_STRICT
7773 Assert(pArgs->cCalls == 0);
7774 pArgs->cCalls++;
7775# endif
7776
7777 iemAImpl_cmpxchg16b_fallback(pArgs->pu128Dst, pArgs->pu128RaxRdx, pArgs->pu128RbxRcx, pArgs->pEFlags);
7778 return VINF_SUCCESS;
7779}
7780
7781#endif /* IN_RING3 */
7782
7783/**
7784 * Implements 'CMPXCHG16B' fallback using rendezvous.
7785 */
7786IEM_CIMPL_DEF_4(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
7787 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags)
7788{
7789#ifdef IN_RING3
7790 struct IEMCIMPLCX16ARGS Args;
7791 Args.pu128Dst = pu128Dst;
7792 Args.pu128RaxRdx = pu128RaxRdx;
7793 Args.pu128RbxRcx = pu128RbxRcx;
7794 Args.pEFlags = pEFlags;
7795# ifdef VBOX_STRICT
7796 Args.cCalls = 0;
7797# endif
7798 VBOXSTRICTRC rcStrict = VMMR3EmtRendezvous(pVCpu->CTX_SUFF(pVM), VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
7799 iemCImpl_cmpxchg16b_fallback_rendezvous_callback, &Args);
7800 Assert(Args.cCalls == 1);
7801 if (rcStrict == VINF_SUCCESS)
7802 {
7803 /* Duplicated tail code. */
7804 rcStrict = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_RW);
7805 if (rcStrict == VINF_SUCCESS)
7806 {
7807 pVCpu->cpum.GstCtx.eflags.u = *pEFlags; /* IEM_MC_COMMIT_EFLAGS */
7808 if (!(*pEFlags & X86_EFL_ZF))
7809 {
7810 pVCpu->cpum.GstCtx.rax = pu128RaxRdx->s.Lo;
7811 pVCpu->cpum.GstCtx.rdx = pu128RaxRdx->s.Hi;
7812 }
7813 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7814 }
7815 }
7816 return rcStrict;
7817#else
7818 RT_NOREF(pVCpu, cbInstr, pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags);
7819 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; /* This should get us to ring-3 for now. Should perhaps be replaced later. */
7820#endif
7821}
7822
7823
7824/**
7825 * Implements 'CLFLUSH' and 'CLFLUSHOPT'.
7826 *
7827 * This is implemented in C because it triggers a load like behaviour without
7828 * actually reading anything. Since that's not so common, it's implemented
7829 * here.
7830 *
7831 * @param iEffSeg The effective segment.
7832 * @param GCPtrEff The address of the image.
7833 */
7834IEM_CIMPL_DEF_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
7835{
7836 /*
7837 * Pretend to do a load w/o reading (see also iemCImpl_monitor and iemMemMap).
7838 */
7839 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrEff);
7840 if (rcStrict == VINF_SUCCESS)
7841 {
7842 RTGCPHYS GCPhysMem;
7843 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrEff, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
7844 if (rcStrict == VINF_SUCCESS)
7845 {
7846#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7847 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7848 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
7849 {
7850 /*
7851 * CLFLUSH/CLFLUSHOPT does not access the memory, but flushes the cache-line
7852 * that contains the address. However, if the address falls in the APIC-access
7853 * page, the address flushed must instead be the corresponding address in the
7854 * virtual-APIC page.
7855 *
7856 * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
7857 */
7858 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem);
7859 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
7860 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
7861 return rcStrict;
7862 }
7863#endif
7864 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7865 return VINF_SUCCESS;
7866 }
7867 }
7868
7869 return rcStrict;
7870}
7871
7872
7873/**
7874 * Implements 'FINIT' and 'FNINIT'.
7875 *
7876 * @param fCheckXcpts Whether to check for umasked pending exceptions or
7877 * not.
7878 */
7879IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
7880{
7881 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
7882 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS))
7883 return iemRaiseDeviceNotAvailable(pVCpu);
7884
7885 iemFpuActualizeStateForChange(pVCpu);
7886 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_X87);
7887
7888 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
7889 if (fCheckXcpts && TODO )
7890 return iemRaiseMathFault(pVCpu);
7891 */
7892
7893 PX86XSAVEAREA pXState = pVCpu->cpum.GstCtx.CTX_SUFF(pXState);
7894 pXState->x87.FCW = 0x37f;
7895 pXState->x87.FSW = 0;
7896 pXState->x87.FTW = 0x00; /* 0 - empty. */
7897 pXState->x87.FPUDP = 0;
7898 pXState->x87.DS = 0; //??
7899 pXState->x87.Rsrvd2= 0;
7900 pXState->x87.FPUIP = 0;
7901 pXState->x87.CS = 0; //??
7902 pXState->x87.Rsrvd1= 0;
7903 pXState->x87.FOP = 0;
7904
7905 iemHlpUsedFpu(pVCpu);
7906 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7907 return VINF_SUCCESS;
7908}
7909
7910
7911/**
7912 * Implements 'FXSAVE'.
7913 *
7914 * @param iEffSeg The effective segment.
7915 * @param GCPtrEff The address of the image.
7916 * @param enmEffOpSize The operand size (only REX.W really matters).
7917 */
7918IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
7919{
7920 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
7921
7922 /*
7923 * Raise exceptions.
7924 */
7925 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
7926 return iemRaiseUndefinedOpcode(pVCpu);
7927 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
7928 return iemRaiseDeviceNotAvailable(pVCpu);
7929 if (GCPtrEff & 15)
7930 {
7931 /** @todo CPU/VM detection possible! \#AC might not be signal for
7932 * all/any misalignment sizes, intel says its an implementation detail. */
7933 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7934 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7935 && pVCpu->iem.s.uCpl == 3)
7936 return iemRaiseAlignmentCheckException(pVCpu);
7937 return iemRaiseGeneralProtectionFault0(pVCpu);
7938 }
7939
7940 /*
7941 * Access the memory.
7942 */
7943 void *pvMem512;
7944 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
7945 if (rcStrict != VINF_SUCCESS)
7946 return rcStrict;
7947 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
7948 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7949
7950 /*
7951 * Store the registers.
7952 */
7953 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
7954 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
7955
7956 /* common for all formats */
7957 pDst->FCW = pSrc->FCW;
7958 pDst->FSW = pSrc->FSW;
7959 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
7960 pDst->FOP = pSrc->FOP;
7961 pDst->MXCSR = pSrc->MXCSR;
7962 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
7963 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
7964 {
7965 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
7966 * them for now... */
7967 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
7968 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
7969 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
7970 pDst->aRegs[i].au32[3] = 0;
7971 }
7972
7973 /* FPU IP, CS, DP and DS. */
7974 pDst->FPUIP = pSrc->FPUIP;
7975 pDst->CS = pSrc->CS;
7976 pDst->FPUDP = pSrc->FPUDP;
7977 pDst->DS = pSrc->DS;
7978 if (enmEffOpSize == IEMMODE_64BIT)
7979 {
7980 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
7981 pDst->Rsrvd1 = pSrc->Rsrvd1;
7982 pDst->Rsrvd2 = pSrc->Rsrvd2;
7983 pDst->au32RsrvdForSoftware[0] = 0;
7984 }
7985 else
7986 {
7987 pDst->Rsrvd1 = 0;
7988 pDst->Rsrvd2 = 0;
7989 pDst->au32RsrvdForSoftware[0] = X86_FXSTATE_RSVD_32BIT_MAGIC;
7990 }
7991
7992 /* XMM registers. */
7993 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR)
7994 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
7995 || pVCpu->iem.s.uCpl != 0)
7996 {
7997 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
7998 for (uint32_t i = 0; i < cXmmRegs; i++)
7999 pDst->aXMM[i] = pSrc->aXMM[i];
8000 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8001 * right? */
8002 }
8003
8004 /*
8005 * Commit the memory.
8006 */
8007 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8008 if (rcStrict != VINF_SUCCESS)
8009 return rcStrict;
8010
8011 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8012 return VINF_SUCCESS;
8013}
8014
8015
8016/**
8017 * Implements 'FXRSTOR'.
8018 *
8019 * @param GCPtrEff The address of the image.
8020 * @param enmEffOpSize The operand size (only REX.W really matters).
8021 */
8022IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8023{
8024 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8025
8026 /*
8027 * Raise exceptions.
8028 */
8029 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
8030 return iemRaiseUndefinedOpcode(pVCpu);
8031 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
8032 return iemRaiseDeviceNotAvailable(pVCpu);
8033 if (GCPtrEff & 15)
8034 {
8035 /** @todo CPU/VM detection possible! \#AC might not be signal for
8036 * all/any misalignment sizes, intel says its an implementation detail. */
8037 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
8038 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC
8039 && pVCpu->iem.s.uCpl == 3)
8040 return iemRaiseAlignmentCheckException(pVCpu);
8041 return iemRaiseGeneralProtectionFault0(pVCpu);
8042 }
8043
8044 /*
8045 * Access the memory.
8046 */
8047 void *pvMem512;
8048 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
8049 if (rcStrict != VINF_SUCCESS)
8050 return rcStrict;
8051 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
8052 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
8053
8054 /*
8055 * Check the state for stuff which will #GP(0).
8056 */
8057 uint32_t const fMXCSR = pSrc->MXCSR;
8058 uint32_t const fMXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8059 if (fMXCSR & ~fMXCSR_MASK)
8060 {
8061 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
8062 return iemRaiseGeneralProtectionFault0(pVCpu);
8063 }
8064
8065 /*
8066 * Load the registers.
8067 */
8068 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
8069 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
8070
8071 /* common for all formats */
8072 pDst->FCW = pSrc->FCW;
8073 pDst->FSW = pSrc->FSW;
8074 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8075 pDst->FOP = pSrc->FOP;
8076 pDst->MXCSR = fMXCSR;
8077 /* (MXCSR_MASK is read-only) */
8078 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
8079 {
8080 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8081 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8082 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8083 pDst->aRegs[i].au32[3] = 0;
8084 }
8085
8086 /* FPU IP, CS, DP and DS. */
8087 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8088 {
8089 pDst->FPUIP = pSrc->FPUIP;
8090 pDst->CS = pSrc->CS;
8091 pDst->Rsrvd1 = pSrc->Rsrvd1;
8092 pDst->FPUDP = pSrc->FPUDP;
8093 pDst->DS = pSrc->DS;
8094 pDst->Rsrvd2 = pSrc->Rsrvd2;
8095 }
8096 else
8097 {
8098 pDst->FPUIP = pSrc->FPUIP;
8099 pDst->CS = pSrc->CS;
8100 pDst->Rsrvd1 = 0;
8101 pDst->FPUDP = pSrc->FPUDP;
8102 pDst->DS = pSrc->DS;
8103 pDst->Rsrvd2 = 0;
8104 }
8105
8106 /* XMM registers. */
8107 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR)
8108 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
8109 || pVCpu->iem.s.uCpl != 0)
8110 {
8111 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8112 for (uint32_t i = 0; i < cXmmRegs; i++)
8113 pDst->aXMM[i] = pSrc->aXMM[i];
8114 }
8115
8116 /*
8117 * Commit the memory.
8118 */
8119 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
8120 if (rcStrict != VINF_SUCCESS)
8121 return rcStrict;
8122
8123 iemHlpUsedFpu(pVCpu);
8124 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8125 return VINF_SUCCESS;
8126}
8127
8128
8129/**
8130 * Implements 'XSAVE'.
8131 *
8132 * @param iEffSeg The effective segment.
8133 * @param GCPtrEff The address of the image.
8134 * @param enmEffOpSize The operand size (only REX.W really matters).
8135 */
8136IEM_CIMPL_DEF_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8137{
8138 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
8139
8140 /*
8141 * Raise exceptions.
8142 */
8143 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
8144 return iemRaiseUndefinedOpcode(pVCpu);
8145 /* When in VMX non-root mode and XSAVE/XRSTOR is not enabled, it results in #UD. */
8146 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8147 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_XSAVES_XRSTORS))
8148 {
8149 Log(("xrstor: Not enabled for nested-guest execution -> #UD\n"));
8150 return iemRaiseUndefinedOpcode(pVCpu);
8151 }
8152 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)
8153 return iemRaiseDeviceNotAvailable(pVCpu);
8154 if (GCPtrEff & 63)
8155 {
8156 /** @todo CPU/VM detection possible! \#AC might not be signal for
8157 * all/any misalignment sizes, intel says its an implementation detail. */
8158 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
8159 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC
8160 && pVCpu->iem.s.uCpl == 3)
8161 return iemRaiseAlignmentCheckException(pVCpu);
8162 return iemRaiseGeneralProtectionFault0(pVCpu);
8163 }
8164
8165 /*
8166 * Calc the requested mask.
8167 */
8168 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0];
8169 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
8170 uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0];
8171
8172/** @todo figure out the exact protocol for the memory access. Currently we
8173 * just need this crap to work halfways to make it possible to test
8174 * AVX instructions. */
8175/** @todo figure out the XINUSE and XMODIFIED */
8176
8177 /*
8178 * Access the x87 memory state.
8179 */
8180 /* The x87+SSE state. */
8181 void *pvMem512;
8182 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8183 if (rcStrict != VINF_SUCCESS)
8184 return rcStrict;
8185 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
8186 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
8187
8188 /* The header. */
8189 PX86XSAVEHDR pHdr;
8190 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW);
8191 if (rcStrict != VINF_SUCCESS)
8192 return rcStrict;
8193
8194 /*
8195 * Store the X87 state.
8196 */
8197 if (fReqComponents & XSAVE_C_X87)
8198 {
8199 /* common for all formats */
8200 pDst->FCW = pSrc->FCW;
8201 pDst->FSW = pSrc->FSW;
8202 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8203 pDst->FOP = pSrc->FOP;
8204 pDst->FPUIP = pSrc->FPUIP;
8205 pDst->CS = pSrc->CS;
8206 pDst->FPUDP = pSrc->FPUDP;
8207 pDst->DS = pSrc->DS;
8208 if (enmEffOpSize == IEMMODE_64BIT)
8209 {
8210 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8211 pDst->Rsrvd1 = pSrc->Rsrvd1;
8212 pDst->Rsrvd2 = pSrc->Rsrvd2;
8213 pDst->au32RsrvdForSoftware[0] = 0;
8214 }
8215 else
8216 {
8217 pDst->Rsrvd1 = 0;
8218 pDst->Rsrvd2 = 0;
8219 pDst->au32RsrvdForSoftware[0] = X86_FXSTATE_RSVD_32BIT_MAGIC;
8220 }
8221 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8222 {
8223 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
8224 * them for now... */
8225 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8226 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8227 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8228 pDst->aRegs[i].au32[3] = 0;
8229 }
8230
8231 }
8232
8233 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
8234 {
8235 pDst->MXCSR = pSrc->MXCSR;
8236 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8237 }
8238
8239 if (fReqComponents & XSAVE_C_SSE)
8240 {
8241 /* XMM registers. */
8242 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8243 for (uint32_t i = 0; i < cXmmRegs; i++)
8244 pDst->aXMM[i] = pSrc->aXMM[i];
8245 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8246 * right? */
8247 }
8248
8249 /* Commit the x87 state bits. (probably wrong) */
8250 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8251 if (rcStrict != VINF_SUCCESS)
8252 return rcStrict;
8253
8254 /*
8255 * Store AVX state.
8256 */
8257 if (fReqComponents & XSAVE_C_YMM)
8258 {
8259 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
8260 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
8261 PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI);
8262 PX86XSAVEYMMHI pCompDst;
8263 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
8264 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8265 if (rcStrict != VINF_SUCCESS)
8266 return rcStrict;
8267
8268 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8269 for (uint32_t i = 0; i < cXmmRegs; i++)
8270 pCompDst->aYmmHi[i] = pCompSrc->aYmmHi[i];
8271
8272 rcStrict = iemMemCommitAndUnmap(pVCpu, pCompDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8273 if (rcStrict != VINF_SUCCESS)
8274 return rcStrict;
8275 }
8276
8277 /*
8278 * Update the header.
8279 */
8280 pHdr->bmXState = (pHdr->bmXState & ~fReqComponents)
8281 | (fReqComponents & fXInUse);
8282
8283 rcStrict = iemMemCommitAndUnmap(pVCpu, pHdr, IEM_ACCESS_DATA_RW);
8284 if (rcStrict != VINF_SUCCESS)
8285 return rcStrict;
8286
8287 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8288 return VINF_SUCCESS;
8289}
8290
8291
8292/**
8293 * Implements 'XRSTOR'.
8294 *
8295 * @param iEffSeg The effective segment.
8296 * @param GCPtrEff The address of the image.
8297 * @param enmEffOpSize The operand size (only REX.W really matters).
8298 */
8299IEM_CIMPL_DEF_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8300{
8301 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
8302
8303 /*
8304 * Raise exceptions.
8305 */
8306 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
8307 return iemRaiseUndefinedOpcode(pVCpu);
8308 /* When in VMX non-root mode and XSAVE/XRSTOR is not enabled, it results in #UD. */
8309 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8310 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_XSAVES_XRSTORS))
8311 {
8312 Log(("xrstor: Not enabled for nested-guest execution -> #UD\n"));
8313 return iemRaiseUndefinedOpcode(pVCpu);
8314 }
8315 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)
8316 return iemRaiseDeviceNotAvailable(pVCpu);
8317 if (GCPtrEff & 63)
8318 {
8319 /** @todo CPU/VM detection possible! \#AC might not be signal for
8320 * all/any misalignment sizes, intel says its an implementation detail. */
8321 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
8322 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC
8323 && pVCpu->iem.s.uCpl == 3)
8324 return iemRaiseAlignmentCheckException(pVCpu);
8325 return iemRaiseGeneralProtectionFault0(pVCpu);
8326 }
8327
8328/** @todo figure out the exact protocol for the memory access. Currently we
8329 * just need this crap to work halfways to make it possible to test
8330 * AVX instructions. */
8331/** @todo figure out the XINUSE and XMODIFIED */
8332
8333 /*
8334 * Access the x87 memory state.
8335 */
8336 /* The x87+SSE state. */
8337 void *pvMem512;
8338 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
8339 if (rcStrict != VINF_SUCCESS)
8340 return rcStrict;
8341 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
8342 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
8343
8344 /*
8345 * Calc the requested mask
8346 */
8347 PX86XSAVEHDR pHdrDst = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->Hdr;
8348 PCX86XSAVEHDR pHdrSrc;
8349 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_R);
8350 if (rcStrict != VINF_SUCCESS)
8351 return rcStrict;
8352
8353 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0];
8354 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
8355 //uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0];
8356 uint64_t const fRstorMask = pHdrSrc->bmXState;
8357 uint64_t const fCompMask = pHdrSrc->bmXComp;
8358
8359 AssertLogRelReturn(!(fCompMask & XSAVE_C_X), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
8360
8361 uint32_t const cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8362
8363 /* We won't need this any longer. */
8364 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pHdrSrc, IEM_ACCESS_DATA_R);
8365 if (rcStrict != VINF_SUCCESS)
8366 return rcStrict;
8367
8368 /*
8369 * Store the X87 state.
8370 */
8371 if (fReqComponents & XSAVE_C_X87)
8372 {
8373 if (fRstorMask & XSAVE_C_X87)
8374 {
8375 pDst->FCW = pSrc->FCW;
8376 pDst->FSW = pSrc->FSW;
8377 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8378 pDst->FOP = pSrc->FOP;
8379 pDst->FPUIP = pSrc->FPUIP;
8380 pDst->CS = pSrc->CS;
8381 pDst->FPUDP = pSrc->FPUDP;
8382 pDst->DS = pSrc->DS;
8383 if (enmEffOpSize == IEMMODE_64BIT)
8384 {
8385 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8386 pDst->Rsrvd1 = pSrc->Rsrvd1;
8387 pDst->Rsrvd2 = pSrc->Rsrvd2;
8388 }
8389 else
8390 {
8391 pDst->Rsrvd1 = 0;
8392 pDst->Rsrvd2 = 0;
8393 }
8394 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8395 {
8396 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8397 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8398 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8399 pDst->aRegs[i].au32[3] = 0;
8400 }
8401 }
8402 else
8403 {
8404 pDst->FCW = 0x37f;
8405 pDst->FSW = 0;
8406 pDst->FTW = 0x00; /* 0 - empty. */
8407 pDst->FPUDP = 0;
8408 pDst->DS = 0; //??
8409 pDst->Rsrvd2= 0;
8410 pDst->FPUIP = 0;
8411 pDst->CS = 0; //??
8412 pDst->Rsrvd1= 0;
8413 pDst->FOP = 0;
8414 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
8415 {
8416 pDst->aRegs[i].au32[0] = 0;
8417 pDst->aRegs[i].au32[1] = 0;
8418 pDst->aRegs[i].au32[2] = 0;
8419 pDst->aRegs[i].au32[3] = 0;
8420 }
8421 }
8422 pHdrDst->bmXState |= XSAVE_C_X87; /* playing safe for now */
8423 }
8424
8425 /* MXCSR */
8426 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
8427 {
8428 if (fRstorMask & (XSAVE_C_SSE | XSAVE_C_YMM))
8429 pDst->MXCSR = pSrc->MXCSR;
8430 else
8431 pDst->MXCSR = 0x1f80;
8432 }
8433
8434 /* XMM registers. */
8435 if (fReqComponents & XSAVE_C_SSE)
8436 {
8437 if (fRstorMask & XSAVE_C_SSE)
8438 {
8439 for (uint32_t i = 0; i < cXmmRegs; i++)
8440 pDst->aXMM[i] = pSrc->aXMM[i];
8441 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8442 * right? */
8443 }
8444 else
8445 {
8446 for (uint32_t i = 0; i < cXmmRegs; i++)
8447 {
8448 pDst->aXMM[i].au64[0] = 0;
8449 pDst->aXMM[i].au64[1] = 0;
8450 }
8451 }
8452 pHdrDst->bmXState |= XSAVE_C_SSE; /* playing safe for now */
8453 }
8454
8455 /* Unmap the x87 state bits (so we've don't run out of mapping). */
8456 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
8457 if (rcStrict != VINF_SUCCESS)
8458 return rcStrict;
8459
8460 /*
8461 * Restore AVX state.
8462 */
8463 if (fReqComponents & XSAVE_C_YMM)
8464 {
8465 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
8466 PX86XSAVEYMMHI pCompDst = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PX86XSAVEYMMHI);
8467
8468 if (fRstorMask & XSAVE_C_YMM)
8469 {
8470 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
8471 PCX86XSAVEYMMHI pCompSrc;
8472 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, sizeof(*pCompDst),
8473 iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT], IEM_ACCESS_DATA_R);
8474 if (rcStrict != VINF_SUCCESS)
8475 return rcStrict;
8476
8477 for (uint32_t i = 0; i < cXmmRegs; i++)
8478 {
8479 pCompDst->aYmmHi[i].au64[0] = pCompSrc->aYmmHi[i].au64[0];
8480 pCompDst->aYmmHi[i].au64[1] = pCompSrc->aYmmHi[i].au64[1];
8481 }
8482
8483 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pCompSrc, IEM_ACCESS_DATA_R);
8484 if (rcStrict != VINF_SUCCESS)
8485 return rcStrict;
8486 }
8487 else
8488 {
8489 for (uint32_t i = 0; i < cXmmRegs; i++)
8490 {
8491 pCompDst->aYmmHi[i].au64[0] = 0;
8492 pCompDst->aYmmHi[i].au64[1] = 0;
8493 }
8494 }
8495 pHdrDst->bmXState |= XSAVE_C_YMM; /* playing safe for now */
8496 }
8497
8498 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8499 return VINF_SUCCESS;
8500}
8501
8502
8503
8504
8505/**
8506 * Implements 'STMXCSR'.
8507 *
8508 * @param GCPtrEff The address of the image.
8509 */
8510IEM_CIMPL_DEF_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
8511{
8512 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8513
8514 /*
8515 * Raise exceptions.
8516 */
8517 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
8518 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
8519 {
8520 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
8521 {
8522 /*
8523 * Do the job.
8524 */
8525 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR);
8526 if (rcStrict == VINF_SUCCESS)
8527 {
8528 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8529 return VINF_SUCCESS;
8530 }
8531 return rcStrict;
8532 }
8533 return iemRaiseDeviceNotAvailable(pVCpu);
8534 }
8535 return iemRaiseUndefinedOpcode(pVCpu);
8536}
8537
8538
8539/**
8540 * Implements 'VSTMXCSR'.
8541 *
8542 * @param GCPtrEff The address of the image.
8543 */
8544IEM_CIMPL_DEF_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
8545{
8546 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_XCRx);
8547
8548 /*
8549 * Raise exceptions.
8550 */
8551 if ( ( !IEM_IS_GUEST_CPU_AMD(pVCpu)
8552 ? (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM)
8553 : !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)) /* AMD Jaguar CPU (f0x16,m0,s1) behaviour */
8554 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
8555 {
8556 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
8557 {
8558 /*
8559 * Do the job.
8560 */
8561 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR);
8562 if (rcStrict == VINF_SUCCESS)
8563 {
8564 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8565 return VINF_SUCCESS;
8566 }
8567 return rcStrict;
8568 }
8569 return iemRaiseDeviceNotAvailable(pVCpu);
8570 }
8571 return iemRaiseUndefinedOpcode(pVCpu);
8572}
8573
8574
8575/**
8576 * Implements 'LDMXCSR'.
8577 *
8578 * @param GCPtrEff The address of the image.
8579 */
8580IEM_CIMPL_DEF_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
8581{
8582 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8583
8584 /*
8585 * Raise exceptions.
8586 */
8587 /** @todo testcase - order of LDMXCSR faults. Does \#PF, \#GP and \#SS
8588 * happen after or before \#UD and \#EM? */
8589 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
8590 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
8591 {
8592 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
8593 {
8594 /*
8595 * Do the job.
8596 */
8597 uint32_t fNewMxCsr;
8598 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, &fNewMxCsr, iEffSeg, GCPtrEff);
8599 if (rcStrict == VINF_SUCCESS)
8600 {
8601 uint32_t const fMxCsrMask = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8602 if (!(fNewMxCsr & ~fMxCsrMask))
8603 {
8604 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR = fNewMxCsr;
8605 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8606 return VINF_SUCCESS;
8607 }
8608 Log(("lddmxcsr: New MXCSR=%#RX32 & ~MASK=%#RX32 = %#RX32 -> #GP(0)\n",
8609 fNewMxCsr, fMxCsrMask, fNewMxCsr & ~fMxCsrMask));
8610 return iemRaiseGeneralProtectionFault0(pVCpu);
8611 }
8612 return rcStrict;
8613 }
8614 return iemRaiseDeviceNotAvailable(pVCpu);
8615 }
8616 return iemRaiseUndefinedOpcode(pVCpu);
8617}
8618
8619
8620/**
8621 * Commmon routine for fnstenv and fnsave.
8622 *
8623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8624 * @param enmEffOpSize The effective operand size.
8625 * @param uPtr Where to store the state.
8626 */
8627static void iemCImplCommonFpuStoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr)
8628{
8629 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
8630 PCX86FXSTATE pSrcX87 = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
8631 if (enmEffOpSize == IEMMODE_16BIT)
8632 {
8633 uPtr.pu16[0] = pSrcX87->FCW;
8634 uPtr.pu16[1] = pSrcX87->FSW;
8635 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87);
8636 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
8637 {
8638 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
8639 * protected mode or long mode and we save it in real mode? And vice
8640 * versa? And with 32-bit operand size? I think CPU is storing the
8641 * effective address ((CS << 4) + IP) in the offset register and not
8642 * doing any address calculations here. */
8643 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP;
8644 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP;
8645 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP;
8646 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000);
8647 }
8648 else
8649 {
8650 uPtr.pu16[3] = pSrcX87->FPUIP;
8651 uPtr.pu16[4] = pSrcX87->CS;
8652 uPtr.pu16[5] = pSrcX87->FPUDP;
8653 uPtr.pu16[6] = pSrcX87->DS;
8654 }
8655 }
8656 else
8657 {
8658 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
8659 uPtr.pu16[0*2] = pSrcX87->FCW;
8660 uPtr.pu16[0*2+1] = 0xffff; /* (0xffff observed on intel skylake.) */
8661 uPtr.pu16[1*2] = pSrcX87->FSW;
8662 uPtr.pu16[1*2+1] = 0xffff;
8663 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87);
8664 uPtr.pu16[2*2+1] = 0xffff;
8665 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
8666 {
8667 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP;
8668 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP;
8669 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP;
8670 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4;
8671 }
8672 else
8673 {
8674 uPtr.pu32[3] = pSrcX87->FPUIP;
8675 uPtr.pu16[4*2] = pSrcX87->CS;
8676 uPtr.pu16[4*2+1] = pSrcX87->FOP;
8677 uPtr.pu32[5] = pSrcX87->FPUDP;
8678 uPtr.pu16[6*2] = pSrcX87->DS;
8679 uPtr.pu16[6*2+1] = 0xffff;
8680 }
8681 }
8682}
8683
8684
8685/**
8686 * Commmon routine for fldenv and frstor
8687 *
8688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8689 * @param enmEffOpSize The effective operand size.
8690 * @param uPtr Where to store the state.
8691 */
8692static void iemCImplCommonFpuRestoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr)
8693{
8694 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
8695 PX86FXSTATE pDstX87 = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
8696 if (enmEffOpSize == IEMMODE_16BIT)
8697 {
8698 pDstX87->FCW = uPtr.pu16[0];
8699 pDstX87->FSW = uPtr.pu16[1];
8700 pDstX87->FTW = uPtr.pu16[2];
8701 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
8702 {
8703 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
8704 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
8705 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
8706 pDstX87->CS = 0;
8707 pDstX87->Rsrvd1= 0;
8708 pDstX87->DS = 0;
8709 pDstX87->Rsrvd2= 0;
8710 }
8711 else
8712 {
8713 pDstX87->FPUIP = uPtr.pu16[3];
8714 pDstX87->CS = uPtr.pu16[4];
8715 pDstX87->Rsrvd1= 0;
8716 pDstX87->FPUDP = uPtr.pu16[5];
8717 pDstX87->DS = uPtr.pu16[6];
8718 pDstX87->Rsrvd2= 0;
8719 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
8720 }
8721 }
8722 else
8723 {
8724 pDstX87->FCW = uPtr.pu16[0*2];
8725 pDstX87->FSW = uPtr.pu16[1*2];
8726 pDstX87->FTW = uPtr.pu16[2*2];
8727 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
8728 {
8729 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
8730 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
8731 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
8732 pDstX87->CS = 0;
8733 pDstX87->Rsrvd1= 0;
8734 pDstX87->DS = 0;
8735 pDstX87->Rsrvd2= 0;
8736 }
8737 else
8738 {
8739 pDstX87->FPUIP = uPtr.pu32[3];
8740 pDstX87->CS = uPtr.pu16[4*2];
8741 pDstX87->Rsrvd1= 0;
8742 pDstX87->FOP = uPtr.pu16[4*2+1];
8743 pDstX87->FPUDP = uPtr.pu32[5];
8744 pDstX87->DS = uPtr.pu16[6*2];
8745 pDstX87->Rsrvd2= 0;
8746 }
8747 }
8748
8749 /* Make adjustments. */
8750 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW);
8751 pDstX87->FCW &= ~X86_FCW_ZERO_MASK;
8752 iemFpuRecalcExceptionStatus(pDstX87);
8753 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
8754 * exceptions are pending after loading the saved state? */
8755}
8756
8757
8758/**
8759 * Implements 'FNSTENV'.
8760 *
8761 * @param enmEffOpSize The operand size (only REX.W really matters).
8762 * @param iEffSeg The effective segment register for @a GCPtrEff.
8763 * @param GCPtrEffDst The address of the image.
8764 */
8765IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
8766{
8767 RTPTRUNION uPtr;
8768 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
8769 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8770 if (rcStrict != VINF_SUCCESS)
8771 return rcStrict;
8772
8773 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr);
8774
8775 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8776 if (rcStrict != VINF_SUCCESS)
8777 return rcStrict;
8778
8779 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
8780 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8781 return VINF_SUCCESS;
8782}
8783
8784
8785/**
8786 * Implements 'FNSAVE'.
8787 *
8788 * @param GCPtrEffDst The address of the image.
8789 * @param enmEffOpSize The operand size.
8790 */
8791IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
8792{
8793 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
8794
8795 RTPTRUNION uPtr;
8796 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
8797 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8798 if (rcStrict != VINF_SUCCESS)
8799 return rcStrict;
8800
8801 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
8802 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr);
8803 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
8804 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
8805 {
8806 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0];
8807 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1];
8808 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4];
8809 }
8810
8811 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8812 if (rcStrict != VINF_SUCCESS)
8813 return rcStrict;
8814
8815 /*
8816 * Re-initialize the FPU context.
8817 */
8818 pFpuCtx->FCW = 0x37f;
8819 pFpuCtx->FSW = 0;
8820 pFpuCtx->FTW = 0x00; /* 0 - empty */
8821 pFpuCtx->FPUDP = 0;
8822 pFpuCtx->DS = 0;
8823 pFpuCtx->Rsrvd2= 0;
8824 pFpuCtx->FPUIP = 0;
8825 pFpuCtx->CS = 0;
8826 pFpuCtx->Rsrvd1= 0;
8827 pFpuCtx->FOP = 0;
8828
8829 iemHlpUsedFpu(pVCpu);
8830 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8831 return VINF_SUCCESS;
8832}
8833
8834
8835
8836/**
8837 * Implements 'FLDENV'.
8838 *
8839 * @param enmEffOpSize The operand size (only REX.W really matters).
8840 * @param iEffSeg The effective segment register for @a GCPtrEff.
8841 * @param GCPtrEffSrc The address of the image.
8842 */
8843IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
8844{
8845 RTCPTRUNION uPtr;
8846 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
8847 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
8848 if (rcStrict != VINF_SUCCESS)
8849 return rcStrict;
8850
8851 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr);
8852
8853 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
8854 if (rcStrict != VINF_SUCCESS)
8855 return rcStrict;
8856
8857 iemHlpUsedFpu(pVCpu);
8858 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8859 return VINF_SUCCESS;
8860}
8861
8862
8863/**
8864 * Implements 'FRSTOR'.
8865 *
8866 * @param GCPtrEffSrc The address of the image.
8867 * @param enmEffOpSize The operand size.
8868 */
8869IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
8870{
8871 RTCPTRUNION uPtr;
8872 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
8873 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
8874 if (rcStrict != VINF_SUCCESS)
8875 return rcStrict;
8876
8877 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
8878 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr);
8879 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
8880 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
8881 {
8882 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0];
8883 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1];
8884 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4];
8885 pFpuCtx->aRegs[i].au32[3] = 0;
8886 }
8887
8888 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
8889 if (rcStrict != VINF_SUCCESS)
8890 return rcStrict;
8891
8892 iemHlpUsedFpu(pVCpu);
8893 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8894 return VINF_SUCCESS;
8895}
8896
8897
8898/**
8899 * Implements 'FLDCW'.
8900 *
8901 * @param u16Fcw The new FCW.
8902 */
8903IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
8904{
8905 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
8906
8907 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
8908 /** @todo Testcase: Try see what happens when trying to set undefined bits
8909 * (other than 6 and 7). Currently ignoring them. */
8910 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
8911 * according to FSW. (This is was is currently implemented.) */
8912 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
8913 pFpuCtx->FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
8914 iemFpuRecalcExceptionStatus(pFpuCtx);
8915
8916 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
8917 iemHlpUsedFpu(pVCpu);
8918 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8919 return VINF_SUCCESS;
8920}
8921
8922
8923
8924/**
8925 * Implements the underflow case of fxch.
8926 *
8927 * @param iStReg The other stack register.
8928 */
8929IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
8930{
8931 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
8932
8933 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
8934 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW);
8935 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
8936 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW));
8937
8938 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
8939 * registers are read as QNaN and then exchanged. This could be
8940 * wrong... */
8941 if (pFpuCtx->FCW & X86_FCW_IM)
8942 {
8943 if (RT_BIT(iReg1) & pFpuCtx->FTW)
8944 {
8945 if (RT_BIT(iReg2) & pFpuCtx->FTW)
8946 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
8947 else
8948 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80;
8949 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
8950 }
8951 else
8952 {
8953 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80;
8954 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
8955 }
8956 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
8957 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
8958 }
8959 else
8960 {
8961 /* raise underflow exception, don't change anything. */
8962 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
8963 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
8964 }
8965
8966 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
8967 iemHlpUsedFpu(pVCpu);
8968 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8969 return VINF_SUCCESS;
8970}
8971
8972
8973/**
8974 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
8975 *
8976 * @param cToAdd 1 or 7.
8977 */
8978IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
8979{
8980 Assert(iStReg < 8);
8981 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
8982
8983 /*
8984 * Raise exceptions.
8985 */
8986 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS))
8987 return iemRaiseDeviceNotAvailable(pVCpu);
8988
8989 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
8990 uint16_t u16Fsw = pFpuCtx->FSW;
8991 if (u16Fsw & X86_FSW_ES)
8992 return iemRaiseMathFault(pVCpu);
8993
8994 /*
8995 * Check if any of the register accesses causes #SF + #IA.
8996 */
8997 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
8998 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
8999 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
9000 {
9001 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
9002 NOREF(u32Eflags);
9003
9004 pFpuCtx->FSW &= ~X86_FSW_C1;
9005 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
9006 if ( !(u16Fsw & X86_FSW_IE)
9007 || (pFpuCtx->FCW & X86_FCW_IM) )
9008 {
9009 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9010 pVCpu->cpum.GstCtx.eflags.u |= pVCpu->cpum.GstCtx.eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9011 }
9012 }
9013 else if (pFpuCtx->FCW & X86_FCW_IM)
9014 {
9015 /* Masked underflow. */
9016 pFpuCtx->FSW &= ~X86_FSW_C1;
9017 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
9018 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9019 pVCpu->cpum.GstCtx.eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
9020 }
9021 else
9022 {
9023 /* Raise underflow - don't touch EFLAGS or TOP. */
9024 pFpuCtx->FSW &= ~X86_FSW_C1;
9025 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
9026 fPop = false;
9027 }
9028
9029 /*
9030 * Pop if necessary.
9031 */
9032 if (fPop)
9033 {
9034 pFpuCtx->FTW &= ~RT_BIT(iReg1);
9035 pFpuCtx->FSW &= X86_FSW_TOP_MASK;
9036 pFpuCtx->FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
9037 }
9038
9039 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
9040 iemHlpUsedFpu(pVCpu);
9041 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
9042 return VINF_SUCCESS;
9043}
9044
9045/** @} */
9046
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette