VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp@ 100052

Last change on this file since 100052 was 100052, checked in by vboxsync, 18 months ago

VMM/IEM: Refactored the enmCpuMode, uCpl, fBypassHandlers, fDisregardLock and fPendingInstruction* IEMCPU members into a single fExec member and associated IEM_F_XXX flag defines. Added more flags needed for recompiled execution. The fExec value is maintained as code is executed, so it does not need to be recalculated in the instruction loops. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 358.8 KB
Line 
1/* $Id: IEMAllCImpl.cpp 100052 2023-06-02 14:49:14Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#define IEM_WITH_OPAQUE_DECODER_STATE
35#include <VBox/vmm/iem.h>
36#include <VBox/vmm/cpum.h>
37#include <VBox/vmm/apic.h>
38#include <VBox/vmm/pdm.h>
39#include <VBox/vmm/pgm.h>
40#include <VBox/vmm/iom.h>
41#include <VBox/vmm/em.h>
42#include <VBox/vmm/hm.h>
43#include <VBox/vmm/nem.h>
44#include <VBox/vmm/gim.h>
45#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
46# include <VBox/vmm/em.h>
47# include <VBox/vmm/hm_svm.h>
48#endif
49#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
50# include <VBox/vmm/hmvmxinline.h>
51#endif
52#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
53# include <VBox/vmm/cpuidcall.h>
54#endif
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/dbgf.h>
57#include <VBox/vmm/dbgftrace.h>
58#include "IEMInternal.h"
59#include <VBox/vmm/vmcc.h>
60#include <VBox/log.h>
61#include <VBox/err.h>
62#include <VBox/param.h>
63#include <VBox/dis.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72/*********************************************************************************************************************************
73* Defined Constants And Macros *
74*********************************************************************************************************************************/
75/**
76 * Flushes the prefetch buffer, light version.
77 * @todo The \#if conditions here must match the ones in iemOpcodeFlushLight().
78 */
79#ifndef IEM_WITH_CODE_TLB
80# define IEM_FLUSH_PREFETCH_LIGHT(a_pVCpu, a_cbInstr) iemOpcodeFlushLight(a_pVCpu, a_cbInstr)
81#else
82# define IEM_FLUSH_PREFETCH_LIGHT(a_pVCpu, a_cbInstr) do { } while (0)
83#endif
84
85/**
86 * Flushes the prefetch buffer, heavy version.
87 * @todo The \#if conditions here must match the ones in iemOpcodeFlushHeavy().
88 */
89#if !defined(IEM_WITH_CODE_TLB) || 1
90# define IEM_FLUSH_PREFETCH_HEAVY(a_pVCpu, a_cbInstr) iemOpcodeFlushHeavy(a_pVCpu, a_cbInstr)
91#else
92# define IEM_FLUSH_PREFETCH_HEAVY(a_pVCpu, a_cbInstr) do { } while (0)
93#endif
94
95
96
97/** @name Misc Helpers
98 * @{
99 */
100
101
102/**
103 * Worker function for iemHlpCheckPortIOPermission, don't call directly.
104 *
105 * @returns Strict VBox status code.
106 *
107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
108 * @param u16Port The port number.
109 * @param cbOperand The operand size.
110 */
111static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PVMCPUCC pVCpu, uint16_t u16Port, uint8_t cbOperand)
112{
113 /* The TSS bits we're interested in are the same on 386 and AMD64. */
114 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY);
115 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
116 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap);
117 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64));
118
119 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
120
121 /*
122 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
123 */
124 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType);
125 if (RT_UNLIKELY( pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY
126 && pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))
127 {
128 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
129 u16Port, cbOperand, pVCpu->cpum.GstCtx.tr.Attr.n.u4Type, pVCpu->cpum.GstCtx.tr.Attr.u));
130 return iemRaiseGeneralProtectionFault0(pVCpu);
131 }
132
133 /*
134 * Read the bitmap offset (may #PF).
135 */
136 uint16_t offBitmap;
137 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &offBitmap, UINT8_MAX,
138 pVCpu->cpum.GstCtx.tr.u64Base + RT_UOFFSETOF(X86TSS64, offIoBitmap));
139 if (rcStrict != VINF_SUCCESS)
140 {
141 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
142 return rcStrict;
143 }
144
145 /*
146 * The bit range from u16Port to (u16Port + cbOperand - 1), however intel
147 * describes the CPU actually reading two bytes regardless of whether the
148 * bit range crosses a byte boundrary. Thus the + 1 in the test below.
149 */
150 uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap;
151 /** @todo check if real CPUs ensures that offBitmap has a minimum value of
152 * for instance sizeof(X86TSS32). */
153 if (offFirstBit + 1 > pVCpu->cpum.GstCtx.tr.u32Limit) /* the limit is inclusive */
154 {
155 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
156 offFirstBit, pVCpu->cpum.GstCtx.tr.u32Limit));
157 return iemRaiseGeneralProtectionFault0(pVCpu);
158 }
159
160 /*
161 * Read the necessary bits.
162 */
163 /** @todo Test the assertion in the intel manual that the CPU reads two
164 * bytes. The question is how this works wrt to \#PF and \#GP on the
165 * 2nd byte when it's not required. */
166 uint16_t bmBytes = UINT16_MAX;
167 rcStrict = iemMemFetchSysU16(pVCpu, &bmBytes, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + offFirstBit);
168 if (rcStrict != VINF_SUCCESS)
169 {
170 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
171 return rcStrict;
172 }
173
174 /*
175 * Perform the check.
176 */
177 uint16_t fPortMask = (1 << cbOperand) - 1;
178 bmBytes >>= (u16Port & 7);
179 if (bmBytes & fPortMask)
180 {
181 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
182 u16Port, cbOperand, bmBytes, fPortMask));
183 return iemRaiseGeneralProtectionFault0(pVCpu);
184 }
185
186 return VINF_SUCCESS;
187}
188
189
190/**
191 * Checks if we are allowed to access the given I/O port, raising the
192 * appropriate exceptions if we aren't (or if the I/O bitmap is not
193 * accessible).
194 *
195 * @returns Strict VBox status code.
196 *
197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
198 * @param u16Port The port number.
199 * @param cbOperand The operand size.
200 */
201DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PVMCPUCC pVCpu, uint16_t u16Port, uint8_t cbOperand)
202{
203 X86EFLAGS Efl;
204 Efl.u = IEMMISC_GET_EFL(pVCpu);
205 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
206 && ( IEM_GET_CPL(pVCpu) > Efl.Bits.u2IOPL
207 || Efl.Bits.u1VM) )
208 return iemHlpCheckPortIOPermissionBitmap(pVCpu, u16Port, cbOperand);
209 return VINF_SUCCESS;
210}
211
212
213#if 0
214/**
215 * Calculates the parity bit.
216 *
217 * @returns true if the bit is set, false if not.
218 * @param u8Result The least significant byte of the result.
219 */
220static bool iemHlpCalcParityFlag(uint8_t u8Result)
221{
222 /*
223 * Parity is set if the number of bits in the least significant byte of
224 * the result is even.
225 */
226 uint8_t cBits;
227 cBits = u8Result & 1; /* 0 */
228 u8Result >>= 1;
229 cBits += u8Result & 1;
230 u8Result >>= 1;
231 cBits += u8Result & 1;
232 u8Result >>= 1;
233 cBits += u8Result & 1;
234 u8Result >>= 1;
235 cBits += u8Result & 1; /* 4 */
236 u8Result >>= 1;
237 cBits += u8Result & 1;
238 u8Result >>= 1;
239 cBits += u8Result & 1;
240 u8Result >>= 1;
241 cBits += u8Result & 1;
242 return !(cBits & 1);
243}
244#endif /* not used */
245
246
247/**
248 * Updates the specified flags according to a 8-bit result.
249 *
250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
251 * @param u8Result The result to set the flags according to.
252 * @param fToUpdate The flags to update.
253 * @param fUndefined The flags that are specified as undefined.
254 */
255static void iemHlpUpdateArithEFlagsU8(PVMCPUCC pVCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
256{
257 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
258 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
259 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined);
260 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags;
261}
262
263
264/**
265 * Updates the specified flags according to a 16-bit result.
266 *
267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
268 * @param u16Result The result to set the flags according to.
269 * @param fToUpdate The flags to update.
270 * @param fUndefined The flags that are specified as undefined.
271 */
272static void iemHlpUpdateArithEFlagsU16(PVMCPUCC pVCpu, uint16_t u16Result, uint32_t fToUpdate, uint32_t fUndefined)
273{
274 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
275 iemAImpl_test_u16(&u16Result, u16Result, &fEFlags);
276 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined);
277 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags;
278}
279
280
281/**
282 * Helper used by iret.
283 *
284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
285 * @param uCpl The new CPL.
286 * @param pSReg Pointer to the segment register.
287 */
288static void iemHlpAdjustSelectorForNewCpl(PVMCPUCC pVCpu, uint8_t uCpl, PCPUMSELREG pSReg)
289{
290 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
291 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_MASK);
292
293 if ( uCpl > pSReg->Attr.n.u2Dpl
294 && pSReg->Attr.n.u1DescType /* code or data, not system */
295 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
296 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
297 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, 0);
298}
299
300
301/**
302 * Indicates that we have modified the FPU state.
303 *
304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
305 */
306DECLINLINE(void) iemHlpUsedFpu(PVMCPUCC pVCpu)
307{
308 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
309}
310
311/** @} */
312
313/** @name C Implementations
314 * @{
315 */
316
317/**
318 * Implements a 16-bit popa.
319 */
320IEM_CIMPL_DEF_0(iemCImpl_popa_16)
321{
322 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu);
323 RTGCPTR GCPtrLast = GCPtrStart + 15;
324 VBOXSTRICTRC rcStrict;
325
326 /*
327 * The docs are a bit hard to comprehend here, but it looks like we wrap
328 * around in real mode as long as none of the individual "popa" crosses the
329 * end of the stack segment. In protected mode we check the whole access
330 * in one go. For efficiency, only do the word-by-word thing if we're in
331 * danger of wrapping around.
332 */
333 /** @todo do popa boundary / wrap-around checks. */
334 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
335 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
336 {
337 /* word-by-word */
338 RTUINT64U TmpRsp;
339 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
340 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.di, &TmpRsp);
341 if (rcStrict == VINF_SUCCESS)
342 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.si, &TmpRsp);
343 if (rcStrict == VINF_SUCCESS)
344 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bp, &TmpRsp);
345 if (rcStrict == VINF_SUCCESS)
346 {
347 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */
348 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bx, &TmpRsp);
349 }
350 if (rcStrict == VINF_SUCCESS)
351 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.dx, &TmpRsp);
352 if (rcStrict == VINF_SUCCESS)
353 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.cx, &TmpRsp);
354 if (rcStrict == VINF_SUCCESS)
355 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.ax, &TmpRsp);
356 if (rcStrict == VINF_SUCCESS)
357 {
358 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
359 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
360 }
361 }
362 else
363 {
364 uint16_t const *pa16Mem = NULL;
365 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa16Mem) - 1);
366 if (rcStrict == VINF_SUCCESS)
367 {
368 pVCpu->cpum.GstCtx.di = pa16Mem[7 - X86_GREG_xDI];
369 pVCpu->cpum.GstCtx.si = pa16Mem[7 - X86_GREG_xSI];
370 pVCpu->cpum.GstCtx.bp = pa16Mem[7 - X86_GREG_xBP];
371 /* skip sp */
372 pVCpu->cpum.GstCtx.bx = pa16Mem[7 - X86_GREG_xBX];
373 pVCpu->cpum.GstCtx.dx = pa16Mem[7 - X86_GREG_xDX];
374 pVCpu->cpum.GstCtx.cx = pa16Mem[7 - X86_GREG_xCX];
375 pVCpu->cpum.GstCtx.ax = pa16Mem[7 - X86_GREG_xAX];
376 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
377 if (rcStrict == VINF_SUCCESS)
378 {
379 iemRegAddToRsp(pVCpu, 16);
380 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
381 }
382 }
383 }
384 return rcStrict;
385}
386
387
388/**
389 * Implements a 32-bit popa.
390 */
391IEM_CIMPL_DEF_0(iemCImpl_popa_32)
392{
393 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu);
394 RTGCPTR GCPtrLast = GCPtrStart + 31;
395 VBOXSTRICTRC rcStrict;
396
397 /*
398 * The docs are a bit hard to comprehend here, but it looks like we wrap
399 * around in real mode as long as none of the individual "popa" crosses the
400 * end of the stack segment. In protected mode we check the whole access
401 * in one go. For efficiency, only do the word-by-word thing if we're in
402 * danger of wrapping around.
403 */
404 /** @todo do popa boundary / wrap-around checks. */
405 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
406 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
407 {
408 /* word-by-word */
409 RTUINT64U TmpRsp;
410 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
411 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edi, &TmpRsp);
412 if (rcStrict == VINF_SUCCESS)
413 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.esi, &TmpRsp);
414 if (rcStrict == VINF_SUCCESS)
415 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebp, &TmpRsp);
416 if (rcStrict == VINF_SUCCESS)
417 {
418 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */
419 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebx, &TmpRsp);
420 }
421 if (rcStrict == VINF_SUCCESS)
422 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edx, &TmpRsp);
423 if (rcStrict == VINF_SUCCESS)
424 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ecx, &TmpRsp);
425 if (rcStrict == VINF_SUCCESS)
426 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.eax, &TmpRsp);
427 if (rcStrict == VINF_SUCCESS)
428 {
429#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
430 pVCpu->cpum.GstCtx.rdi &= UINT32_MAX;
431 pVCpu->cpum.GstCtx.rsi &= UINT32_MAX;
432 pVCpu->cpum.GstCtx.rbp &= UINT32_MAX;
433 pVCpu->cpum.GstCtx.rbx &= UINT32_MAX;
434 pVCpu->cpum.GstCtx.rdx &= UINT32_MAX;
435 pVCpu->cpum.GstCtx.rcx &= UINT32_MAX;
436 pVCpu->cpum.GstCtx.rax &= UINT32_MAX;
437#endif
438 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
439 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
440 }
441 }
442 else
443 {
444 uint32_t const *pa32Mem;
445 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa32Mem) - 1);
446 if (rcStrict == VINF_SUCCESS)
447 {
448 pVCpu->cpum.GstCtx.rdi = pa32Mem[7 - X86_GREG_xDI];
449 pVCpu->cpum.GstCtx.rsi = pa32Mem[7 - X86_GREG_xSI];
450 pVCpu->cpum.GstCtx.rbp = pa32Mem[7 - X86_GREG_xBP];
451 /* skip esp */
452 pVCpu->cpum.GstCtx.rbx = pa32Mem[7 - X86_GREG_xBX];
453 pVCpu->cpum.GstCtx.rdx = pa32Mem[7 - X86_GREG_xDX];
454 pVCpu->cpum.GstCtx.rcx = pa32Mem[7 - X86_GREG_xCX];
455 pVCpu->cpum.GstCtx.rax = pa32Mem[7 - X86_GREG_xAX];
456 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
457 if (rcStrict == VINF_SUCCESS)
458 {
459 iemRegAddToRsp(pVCpu, 32);
460 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
461 }
462 }
463 }
464 return rcStrict;
465}
466
467
468/**
469 * Implements a 16-bit pusha.
470 */
471IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
472{
473 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu);
474 RTGCPTR GCPtrBottom = GCPtrTop - 15;
475 VBOXSTRICTRC rcStrict;
476
477 /*
478 * The docs are a bit hard to comprehend here, but it looks like we wrap
479 * around in real mode as long as none of the individual "pushd" crosses the
480 * end of the stack segment. In protected mode we check the whole access
481 * in one go. For efficiency, only do the word-by-word thing if we're in
482 * danger of wrapping around.
483 */
484 /** @todo do pusha boundary / wrap-around checks. */
485 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
486 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
487 {
488 /* word-by-word */
489 RTUINT64U TmpRsp;
490 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
491 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.ax, &TmpRsp);
492 if (rcStrict == VINF_SUCCESS)
493 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.cx, &TmpRsp);
494 if (rcStrict == VINF_SUCCESS)
495 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.dx, &TmpRsp);
496 if (rcStrict == VINF_SUCCESS)
497 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bx, &TmpRsp);
498 if (rcStrict == VINF_SUCCESS)
499 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.sp, &TmpRsp);
500 if (rcStrict == VINF_SUCCESS)
501 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bp, &TmpRsp);
502 if (rcStrict == VINF_SUCCESS)
503 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.si, &TmpRsp);
504 if (rcStrict == VINF_SUCCESS)
505 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.di, &TmpRsp);
506 if (rcStrict == VINF_SUCCESS)
507 {
508 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
509 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
510 }
511 }
512 else
513 {
514 GCPtrBottom--;
515 uint16_t *pa16Mem = NULL;
516 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa16Mem) - 1);
517 if (rcStrict == VINF_SUCCESS)
518 {
519 pa16Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.di;
520 pa16Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.si;
521 pa16Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.bp;
522 pa16Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.sp;
523 pa16Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.bx;
524 pa16Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.dx;
525 pa16Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.cx;
526 pa16Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.ax;
527 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
528 if (rcStrict == VINF_SUCCESS)
529 {
530 iemRegSubFromRsp(pVCpu, 16);
531 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
532 }
533 }
534 }
535 return rcStrict;
536}
537
538
539/**
540 * Implements a 32-bit pusha.
541 */
542IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
543{
544 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu);
545 RTGCPTR GCPtrBottom = GCPtrTop - 31;
546 VBOXSTRICTRC rcStrict;
547
548 /*
549 * The docs are a bit hard to comprehend here, but it looks like we wrap
550 * around in real mode as long as none of the individual "pusha" crosses the
551 * end of the stack segment. In protected mode we check the whole access
552 * in one go. For efficiency, only do the word-by-word thing if we're in
553 * danger of wrapping around.
554 */
555 /** @todo do pusha boundary / wrap-around checks. */
556 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
557 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
558 {
559 /* word-by-word */
560 RTUINT64U TmpRsp;
561 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
562 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.eax, &TmpRsp);
563 if (rcStrict == VINF_SUCCESS)
564 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ecx, &TmpRsp);
565 if (rcStrict == VINF_SUCCESS)
566 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edx, &TmpRsp);
567 if (rcStrict == VINF_SUCCESS)
568 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebx, &TmpRsp);
569 if (rcStrict == VINF_SUCCESS)
570 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esp, &TmpRsp);
571 if (rcStrict == VINF_SUCCESS)
572 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebp, &TmpRsp);
573 if (rcStrict == VINF_SUCCESS)
574 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esi, &TmpRsp);
575 if (rcStrict == VINF_SUCCESS)
576 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edi, &TmpRsp);
577 if (rcStrict == VINF_SUCCESS)
578 {
579 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
580 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
581 }
582 }
583 else
584 {
585 GCPtrBottom--;
586 uint32_t *pa32Mem;
587 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa32Mem) - 1);
588 if (rcStrict == VINF_SUCCESS)
589 {
590 pa32Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.edi;
591 pa32Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.esi;
592 pa32Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.ebp;
593 pa32Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.esp;
594 pa32Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.ebx;
595 pa32Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.edx;
596 pa32Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.ecx;
597 pa32Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.eax;
598 rcStrict = iemMemCommitAndUnmap(pVCpu, pa32Mem, IEM_ACCESS_STACK_W);
599 if (rcStrict == VINF_SUCCESS)
600 {
601 iemRegSubFromRsp(pVCpu, 32);
602 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
603 }
604 }
605 }
606 return rcStrict;
607}
608
609
610/**
611 * Implements pushf.
612 *
613 *
614 * @param enmEffOpSize The effective operand size.
615 */
616IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
617{
618 VBOXSTRICTRC rcStrict;
619
620 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_PUSHF))
621 {
622 Log2(("pushf: Guest intercept -> #VMEXIT\n"));
623 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
624 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
625 }
626
627 /*
628 * If we're in V8086 mode some care is required (which is why we're in
629 * doing this in a C implementation).
630 */
631 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
632 if ( (fEfl & X86_EFL_VM)
633 && X86_EFL_GET_IOPL(fEfl) != 3 )
634 {
635 Assert(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE);
636 if ( enmEffOpSize != IEMMODE_16BIT
637 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME))
638 return iemRaiseGeneralProtectionFault0(pVCpu);
639 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
640 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
641 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
642 }
643 else
644 {
645
646 /*
647 * Ok, clear RF and VM, adjust for ancient CPUs, and push the flags.
648 */
649 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
650
651 switch (enmEffOpSize)
652 {
653 case IEMMODE_16BIT:
654 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
655 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_186)
656 fEfl |= UINT16_C(0xf000);
657 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
658 break;
659 case IEMMODE_32BIT:
660 rcStrict = iemMemStackPushU32(pVCpu, fEfl);
661 break;
662 case IEMMODE_64BIT:
663 rcStrict = iemMemStackPushU64(pVCpu, fEfl);
664 break;
665 IEM_NOT_REACHED_DEFAULT_CASE_RET();
666 }
667 }
668
669 if (rcStrict == VINF_SUCCESS)
670 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
671 return rcStrict;
672}
673
674
675/**
676 * Implements popf.
677 *
678 * @param enmEffOpSize The effective operand size.
679 */
680IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
681{
682 uint32_t const fEflOld = IEMMISC_GET_EFL(pVCpu);
683 VBOXSTRICTRC rcStrict;
684 uint32_t fEflNew;
685
686 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_POPF))
687 {
688 Log2(("popf: Guest intercept -> #VMEXIT\n"));
689 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
690 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
691 }
692
693 /*
694 * V8086 is special as usual.
695 */
696 if (fEflOld & X86_EFL_VM)
697 {
698 /*
699 * Almost anything goes if IOPL is 3.
700 */
701 if (X86_EFL_GET_IOPL(fEflOld) == 3)
702 {
703 switch (enmEffOpSize)
704 {
705 case IEMMODE_16BIT:
706 {
707 uint16_t u16Value;
708 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
709 if (rcStrict != VINF_SUCCESS)
710 return rcStrict;
711 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
712 break;
713 }
714 case IEMMODE_32BIT:
715 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
716 if (rcStrict != VINF_SUCCESS)
717 return rcStrict;
718 break;
719 IEM_NOT_REACHED_DEFAULT_CASE_RET();
720 }
721
722 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
723 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
724 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
725 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
726 }
727 /*
728 * Interrupt flag virtualization with CR4.VME=1.
729 */
730 else if ( enmEffOpSize == IEMMODE_16BIT
731 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) )
732 {
733 uint16_t u16Value;
734 RTUINT64U TmpRsp;
735 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
736 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp);
737 if (rcStrict != VINF_SUCCESS)
738 return rcStrict;
739
740 /** @todo Is the popf VME \#GP(0) delivered after updating RSP+RIP
741 * or before? */
742 if ( ( (u16Value & X86_EFL_IF)
743 && (fEflOld & X86_EFL_VIP))
744 || (u16Value & X86_EFL_TF) )
745 return iemRaiseGeneralProtectionFault0(pVCpu);
746
747 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
748 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
749 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
750 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
751
752 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
753 }
754 else
755 return iemRaiseGeneralProtectionFault0(pVCpu);
756
757 }
758 /*
759 * Not in V8086 mode.
760 */
761 else
762 {
763 /* Pop the flags. */
764 switch (enmEffOpSize)
765 {
766 case IEMMODE_16BIT:
767 {
768 uint16_t u16Value;
769 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
770 if (rcStrict != VINF_SUCCESS)
771 return rcStrict;
772 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
773
774 /*
775 * Ancient CPU adjustments:
776 * - 8086, 80186, V20/30:
777 * Fixed bits 15:12 bits are not kept correctly internally, mostly for
778 * practical reasons (masking below). We add them when pushing flags.
779 * - 80286:
780 * The NT and IOPL flags cannot be popped from real mode and are
781 * therefore always zero (since a 286 can never exit from PM and
782 * their initial value is zero). This changed on a 386 and can
783 * therefore be used to detect 286 or 386 CPU in real mode.
784 */
785 if ( IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286
786 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
787 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL);
788 break;
789 }
790 case IEMMODE_32BIT:
791 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
792 if (rcStrict != VINF_SUCCESS)
793 return rcStrict;
794 break;
795 case IEMMODE_64BIT:
796 {
797 uint64_t u64Value;
798 rcStrict = iemMemStackPopU64(pVCpu, &u64Value);
799 if (rcStrict != VINF_SUCCESS)
800 return rcStrict;
801 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
802 break;
803 }
804 IEM_NOT_REACHED_DEFAULT_CASE_RET();
805 }
806
807 /* Merge them with the current flags. */
808 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
809 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
810 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
811 || IEM_GET_CPL(pVCpu) == 0)
812 {
813 fEflNew &= fPopfBits;
814 fEflNew |= ~fPopfBits & fEflOld;
815 }
816 else if (IEM_GET_CPL(pVCpu) <= X86_EFL_GET_IOPL(fEflOld))
817 {
818 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
819 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
820 }
821 else
822 {
823 fEflNew &= fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF);
824 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
825 }
826 }
827
828 /*
829 * Commit the flags.
830 */
831 Assert(fEflNew & RT_BIT_32(1));
832 IEMMISC_SET_EFL(pVCpu, fEflNew);
833 return iemRegAddToRipAndFinishingClearingRfEx(pVCpu, cbInstr, fEflOld);
834}
835
836
837/**
838 * Implements an indirect call.
839 *
840 * @param uNewPC The new program counter (RIP) value (loaded from the
841 * operand).
842 */
843IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
844{
845 uint16_t const uOldPC = pVCpu->cpum.GstCtx.ip + cbInstr;
846 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)
847 {
848 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
849 if (rcStrict == VINF_SUCCESS)
850 {
851 pVCpu->cpum.GstCtx.rip = uNewPC;
852 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
853 return iemRegFinishClearingRF(pVCpu);
854 }
855 return rcStrict;
856 }
857 return iemRaiseGeneralProtectionFault0(pVCpu);
858}
859
860
861/**
862 * Implements a 16-bit relative call.
863 *
864 * @param offDisp The displacment offset.
865 */
866IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
867{
868 uint16_t const uOldPC = pVCpu->cpum.GstCtx.ip + cbInstr;
869 uint16_t const uNewPC = uOldPC + offDisp;
870 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)
871 {
872 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
873 if (rcStrict == VINF_SUCCESS)
874 {
875 pVCpu->cpum.GstCtx.rip = uNewPC;
876 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
877 return iemRegFinishClearingRF(pVCpu);
878 }
879 return rcStrict;
880 }
881 return iemRaiseGeneralProtectionFault0(pVCpu);
882}
883
884
885/**
886 * Implements a 32-bit indirect call.
887 *
888 * @param uNewPC The new program counter (RIP) value (loaded from the
889 * operand).
890 */
891IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
892{
893 uint32_t const uOldPC = pVCpu->cpum.GstCtx.eip + cbInstr;
894 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)
895 {
896 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
897 if (rcStrict == VINF_SUCCESS)
898 {
899 pVCpu->cpum.GstCtx.rip = uNewPC;
900 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
901 return iemRegFinishClearingRF(pVCpu);
902 }
903 return rcStrict;
904 }
905 return iemRaiseGeneralProtectionFault0(pVCpu);
906}
907
908
909/**
910 * Implements a 32-bit relative call.
911 *
912 * @param offDisp The displacment offset.
913 */
914IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
915{
916 uint32_t const uOldPC = pVCpu->cpum.GstCtx.eip + cbInstr;
917 uint32_t const uNewPC = uOldPC + offDisp;
918 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)
919 {
920 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
921 if (rcStrict == VINF_SUCCESS)
922 {
923 pVCpu->cpum.GstCtx.rip = uNewPC;
924 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
925 return iemRegFinishClearingRF(pVCpu);
926 }
927 return rcStrict;
928 }
929 return iemRaiseGeneralProtectionFault0(pVCpu);
930}
931
932
933/**
934 * Implements a 64-bit indirect call.
935 *
936 * @param uNewPC The new program counter (RIP) value (loaded from the
937 * operand).
938 */
939IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
940{
941 uint64_t const uOldPC = pVCpu->cpum.GstCtx.rip + cbInstr;
942 if (IEM_IS_CANONICAL(uNewPC))
943 {
944 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
945 if (rcStrict == VINF_SUCCESS)
946 {
947 pVCpu->cpum.GstCtx.rip = uNewPC;
948 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
949 return iemRegFinishClearingRF(pVCpu);
950 }
951 return rcStrict;
952 }
953 return iemRaiseGeneralProtectionFault0(pVCpu);
954}
955
956
957/**
958 * Implements a 64-bit relative call.
959 *
960 * @param offDisp The displacment offset.
961 */
962IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
963{
964 uint64_t const uOldPC = pVCpu->cpum.GstCtx.rip + cbInstr;
965 uint64_t const uNewPC = uOldPC + offDisp;
966 if (IEM_IS_CANONICAL(uNewPC))
967 {
968 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
969 if (rcStrict == VINF_SUCCESS)
970 {
971 pVCpu->cpum.GstCtx.rip = uNewPC;
972 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
973 return iemRegFinishClearingRF(pVCpu);
974 }
975 return rcStrict;
976 }
977 return iemRaiseNotCanonical(pVCpu);
978}
979
980
981/**
982 * Implements far jumps and calls thru task segments (TSS).
983 *
984 * @returns VBox strict status code.
985 * @param pVCpu The cross context virtual CPU structure of the
986 * calling thread.
987 * @param cbInstr The current instruction length.
988 * @param uSel The selector.
989 * @param enmBranch The kind of branching we're performing.
990 * @param enmEffOpSize The effective operand size.
991 * @param pDesc The descriptor corresponding to @a uSel. The type is
992 * task gate.
993 */
994static VBOXSTRICTRC iemCImpl_BranchTaskSegment(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
995 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
996{
997#ifndef IEM_IMPLEMENTS_TASKSWITCH
998 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
999#else
1000 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1001 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
1002 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
1003 RT_NOREF_PV(enmEffOpSize);
1004 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1005
1006 if ( pDesc->Legacy.Gate.u2Dpl < IEM_GET_CPL(pVCpu)
1007 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1008 {
1009 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1010 IEM_GET_CPL(pVCpu), (uSel & X86_SEL_RPL)));
1011 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1012 }
1013
1014 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
1015 * far calls (see iemCImpl_callf). Most likely in both cases it should be
1016 * checked here, need testcases. */
1017 if (!pDesc->Legacy.Gen.u1Present)
1018 {
1019 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel));
1020 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1021 }
1022
1023 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
1024 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1025 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc);
1026#endif
1027}
1028
1029
1030/**
1031 * Implements far jumps and calls thru task gates.
1032 *
1033 * @returns VBox strict status code.
1034 * @param pVCpu The cross context virtual CPU structure of the
1035 * calling thread.
1036 * @param cbInstr The current instruction length.
1037 * @param uSel The selector.
1038 * @param enmBranch The kind of branching we're performing.
1039 * @param enmEffOpSize The effective operand size.
1040 * @param pDesc The descriptor corresponding to @a uSel. The type is
1041 * task gate.
1042 */
1043static VBOXSTRICTRC iemCImpl_BranchTaskGate(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
1044 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
1045{
1046#ifndef IEM_IMPLEMENTS_TASKSWITCH
1047 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1048#else
1049 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1050 RT_NOREF_PV(enmEffOpSize);
1051 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1052
1053 if ( pDesc->Legacy.Gate.u2Dpl < IEM_GET_CPL(pVCpu)
1054 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1055 {
1056 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1057 IEM_GET_CPL(pVCpu), (uSel & X86_SEL_RPL)));
1058 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1059 }
1060
1061 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
1062 * far calls (see iemCImpl_callf). Most likely in both cases it should be
1063 * checked here, need testcases. */
1064 if (!pDesc->Legacy.Gen.u1Present)
1065 {
1066 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel));
1067 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1068 }
1069
1070 /*
1071 * Fetch the new TSS descriptor from the GDT.
1072 */
1073 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel;
1074 if (uSelTss & X86_SEL_LDT)
1075 {
1076 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss));
1077 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1078 }
1079
1080 IEMSELDESC TssDesc;
1081 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelTss, X86_XCPT_GP);
1082 if (rcStrict != VINF_SUCCESS)
1083 return rcStrict;
1084
1085 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK)
1086 {
1087 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss,
1088 TssDesc.Legacy.Gate.u4Type));
1089 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1090 }
1091
1092 if (!TssDesc.Legacy.Gate.u1Present)
1093 {
1094 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss));
1095 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelTss & X86_SEL_MASK_OFF_RPL);
1096 }
1097
1098 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
1099 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1100 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc);
1101#endif
1102}
1103
1104
1105/**
1106 * Implements far jumps and calls thru call gates.
1107 *
1108 * @returns VBox strict status code.
1109 * @param pVCpu The cross context virtual CPU structure of the
1110 * calling thread.
1111 * @param cbInstr The current instruction length.
1112 * @param uSel The selector.
1113 * @param enmBranch The kind of branching we're performing.
1114 * @param enmEffOpSize The effective operand size.
1115 * @param pDesc The descriptor corresponding to @a uSel. The type is
1116 * call gate.
1117 */
1118static VBOXSTRICTRC iemCImpl_BranchCallGate(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
1119 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
1120{
1121#define IEM_IMPLEMENTS_CALLGATE
1122#ifndef IEM_IMPLEMENTS_CALLGATE
1123 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1124#else
1125 RT_NOREF_PV(enmEffOpSize);
1126 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1127
1128 /* NB: Far jumps can only do intra-privilege transfers. Far calls support
1129 * inter-privilege calls and are much more complex.
1130 *
1131 * NB: 64-bit call gate has the same type as a 32-bit call gate! If
1132 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
1133 * must be 16-bit or 32-bit.
1134 */
1135 /** @todo effective operand size is probably irrelevant here, only the
1136 * call gate bitness matters??
1137 */
1138 VBOXSTRICTRC rcStrict;
1139 RTPTRUNION uPtrRet;
1140 uint64_t uNewRsp;
1141 uint64_t uNewRip;
1142 uint64_t u64Base;
1143 uint32_t cbLimit;
1144 RTSEL uNewCS;
1145 IEMSELDESC DescCS;
1146
1147 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE);
1148 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1149 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE
1150 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE);
1151
1152 /* Determine the new instruction pointer from the gate descriptor. */
1153 uNewRip = pDesc->Legacy.Gate.u16OffsetLow
1154 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16)
1155 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32);
1156
1157 /* Perform DPL checks on the gate descriptor. */
1158 if ( pDesc->Legacy.Gate.u2Dpl < IEM_GET_CPL(pVCpu)
1159 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1160 {
1161 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1162 IEM_GET_CPL(pVCpu), (uSel & X86_SEL_RPL)));
1163 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1164 }
1165
1166 /** @todo does this catch NULL selectors, too? */
1167 if (!pDesc->Legacy.Gen.u1Present)
1168 {
1169 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel));
1170 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1171 }
1172
1173 /*
1174 * Fetch the target CS descriptor from the GDT or LDT.
1175 */
1176 uNewCS = pDesc->Legacy.Gate.u16Sel;
1177 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_GP);
1178 if (rcStrict != VINF_SUCCESS)
1179 return rcStrict;
1180
1181 /* Target CS must be a code selector. */
1182 if ( !DescCS.Legacy.Gen.u1DescType
1183 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1184 {
1185 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1186 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1187 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1188 }
1189
1190 /* Privilege checks on target CS. */
1191 if (enmBranch == IEMBRANCH_JUMP)
1192 {
1193 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1194 {
1195 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
1196 {
1197 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1198 uNewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1199 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1200 }
1201 }
1202 else
1203 {
1204 if (DescCS.Legacy.Gen.u2Dpl != IEM_GET_CPL(pVCpu))
1205 {
1206 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1207 uNewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1208 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1209 }
1210 }
1211 }
1212 else
1213 {
1214 Assert(enmBranch == IEMBRANCH_CALL);
1215 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
1216 {
1217 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1218 uNewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1219 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1220 }
1221 }
1222
1223 /* Additional long mode checks. */
1224 if (IEM_IS_LONG_MODE(pVCpu))
1225 {
1226 if (!DescCS.Legacy.Gen.u1Long)
1227 {
1228 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS));
1229 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1230 }
1231
1232 /* L vs D. */
1233 if ( DescCS.Legacy.Gen.u1Long
1234 && DescCS.Legacy.Gen.u1DefBig)
1235 {
1236 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS));
1237 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1238 }
1239 }
1240
1241 if (!DescCS.Legacy.Gate.u1Present)
1242 {
1243 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS));
1244 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCS);
1245 }
1246
1247 if (enmBranch == IEMBRANCH_JUMP)
1248 {
1249 /** @todo This is very similar to regular far jumps; merge! */
1250 /* Jumps are fairly simple... */
1251
1252 /* Chop the high bits off if 16-bit gate (Intel says so). */
1253 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1254 uNewRip = (uint16_t)uNewRip;
1255
1256 /* Limit check for non-long segments. */
1257 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1258 if (DescCS.Legacy.Gen.u1Long)
1259 u64Base = 0;
1260 else
1261 {
1262 if (uNewRip > cbLimit)
1263 {
1264 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
1265 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1266 }
1267 u64Base = X86DESC_BASE(&DescCS.Legacy);
1268 }
1269
1270 /* Canonical address check. */
1271 if (!IEM_IS_CANONICAL(uNewRip))
1272 {
1273 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1274 return iemRaiseNotCanonical(pVCpu);
1275 }
1276
1277 /*
1278 * Ok, everything checked out fine. Now set the accessed bit before
1279 * committing the result into CS, CSHID and RIP.
1280 */
1281 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1282 {
1283 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1284 if (rcStrict != VINF_SUCCESS)
1285 return rcStrict;
1286 /** @todo check what VT-x and AMD-V does. */
1287 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1288 }
1289
1290 /* commit */
1291 pVCpu->cpum.GstCtx.rip = uNewRip;
1292 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1293 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu); /** @todo is this right for conforming segs? or in general? */
1294 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1295 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1296 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1297 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1298 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1299 }
1300 else
1301 {
1302 Assert(enmBranch == IEMBRANCH_CALL);
1303 /* Calls are much more complicated. */
1304
1305 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) && (DescCS.Legacy.Gen.u2Dpl < IEM_GET_CPL(pVCpu)))
1306 {
1307 uint16_t offNewStack; /* Offset of new stack in TSS. */
1308 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */
1309 uint8_t uNewCSDpl;
1310 uint8_t cbWords;
1311 RTSEL uNewSS;
1312 RTSEL uOldSS;
1313 uint64_t uOldRsp;
1314 IEMSELDESC DescSS;
1315 RTPTRUNION uPtrTSS;
1316 RTGCPTR GCPtrTSS;
1317 RTPTRUNION uPtrParmWds;
1318 RTGCPTR GCPtrParmWds;
1319
1320 /* More privilege. This is the fun part. */
1321 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */
1322
1323 /*
1324 * Determine new SS:rSP from the TSS.
1325 */
1326 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType);
1327
1328 /* Figure out where the new stack pointer is stored in the TSS. */
1329 uNewCSDpl = DescCS.Legacy.Gen.u2Dpl;
1330 if (!IEM_IS_LONG_MODE(pVCpu))
1331 {
1332 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1333 {
1334 offNewStack = RT_UOFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8;
1335 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0);
1336 }
1337 else
1338 {
1339 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1340 offNewStack = RT_UOFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4;
1341 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0);
1342 }
1343 }
1344 else
1345 {
1346 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1347 offNewStack = RT_UOFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0);
1348 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0);
1349 }
1350
1351 /* Check against TSS limit. */
1352 if ((uint16_t)(offNewStack + cbNewStack - 1) > pVCpu->cpum.GstCtx.tr.u32Limit)
1353 {
1354 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pVCpu->cpum.GstCtx.tr.u32Limit));
1355 return iemRaiseTaskSwitchFaultBySelector(pVCpu, pVCpu->cpum.GstCtx.tr.Sel);
1356 }
1357
1358 GCPtrTSS = pVCpu->cpum.GstCtx.tr.u64Base + offNewStack;
1359 rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R, 0);
1360 if (rcStrict != VINF_SUCCESS)
1361 {
1362 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1363 return rcStrict;
1364 }
1365
1366 if (!IEM_IS_LONG_MODE(pVCpu))
1367 {
1368 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1369 {
1370 uNewRsp = uPtrTSS.pu32[0];
1371 uNewSS = uPtrTSS.pu16[2];
1372 }
1373 else
1374 {
1375 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1376 uNewRsp = uPtrTSS.pu16[0];
1377 uNewSS = uPtrTSS.pu16[1];
1378 }
1379 }
1380 else
1381 {
1382 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1383 /* SS will be a NULL selector, but that's valid. */
1384 uNewRsp = uPtrTSS.pu64[0];
1385 uNewSS = uNewCSDpl;
1386 }
1387
1388 /* Done with the TSS now. */
1389 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);
1390 if (rcStrict != VINF_SUCCESS)
1391 {
1392 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1393 return rcStrict;
1394 }
1395
1396 /* Only used outside of long mode. */
1397 cbWords = pDesc->Legacy.Gate.u5ParmCount;
1398
1399 /* If EFER.LMA is 0, there's extra work to do. */
1400 if (!IEM_IS_LONG_MODE(pVCpu))
1401 {
1402 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
1403 {
1404 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
1405 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1406 }
1407
1408 /* Grab the new SS descriptor. */
1409 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1410 if (rcStrict != VINF_SUCCESS)
1411 return rcStrict;
1412
1413 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */
1414 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL))
1415 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl))
1416 {
1417 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n",
1418 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl));
1419 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1420 }
1421
1422 /* Ensure new SS is a writable data segment. */
1423 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
1424 {
1425 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
1426 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1427 }
1428
1429 if (!DescSS.Legacy.Gen.u1Present)
1430 {
1431 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS));
1432 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
1433 }
1434 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1435 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords);
1436 else
1437 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords);
1438 }
1439 else
1440 {
1441 /* Just grab the new (NULL) SS descriptor. */
1442 /** @todo testcase: Check whether the zero GDT entry is actually loaded here
1443 * like we do... */
1444 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1445 if (rcStrict != VINF_SUCCESS)
1446 return rcStrict;
1447
1448 cbNewStack = sizeof(uint64_t) * 4;
1449 }
1450
1451 /** @todo According to Intel, new stack is checked for enough space first,
1452 * then switched. According to AMD, the stack is switched first and
1453 * then pushes might fault!
1454 * NB: OS/2 Warp 3/4 actively relies on the fact that possible
1455 * incoming stack \#PF happens before actual stack switch. AMD is
1456 * either lying or implicitly assumes that new state is committed
1457 * only if and when an instruction doesn't fault.
1458 */
1459
1460 /** @todo According to AMD, CS is loaded first, then SS.
1461 * According to Intel, it's the other way around!?
1462 */
1463
1464 /** @todo Intel and AMD disagree on when exactly the CPL changes! */
1465
1466 /* Set the accessed bit before committing new SS. */
1467 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1468 {
1469 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
1470 if (rcStrict != VINF_SUCCESS)
1471 return rcStrict;
1472 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1473 }
1474
1475 /* Remember the old SS:rSP and their linear address. */
1476 uOldSS = pVCpu->cpum.GstCtx.ss.Sel;
1477 uOldRsp = pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig ? pVCpu->cpum.GstCtx.rsp : pVCpu->cpum.GstCtx.sp;
1478
1479 GCPtrParmWds = pVCpu->cpum.GstCtx.ss.u64Base + uOldRsp;
1480
1481 /* HACK ALERT! Probe if the write to the new stack will succeed. May #SS(NewSS)
1482 or #PF, the former is not implemented in this workaround. */
1483 /** @todo Proper fix callgate target stack exceptions. */
1484 /** @todo testcase: Cover callgates with partially or fully inaccessible
1485 * target stacks. */
1486 void *pvNewFrame;
1487 RTGCPTR GCPtrNewStack = X86DESC_BASE(&DescSS.Legacy) + uNewRsp - cbNewStack;
1488 rcStrict = iemMemMap(pVCpu, &pvNewFrame, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW, 0);
1489 if (rcStrict != VINF_SUCCESS)
1490 {
1491 Log(("BranchCallGate: Incoming stack (%04x:%08RX64) not accessible, rc=%Rrc\n", uNewSS, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
1492 return rcStrict;
1493 }
1494 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewFrame, IEM_ACCESS_SYS_RW);
1495 if (rcStrict != VINF_SUCCESS)
1496 {
1497 Log(("BranchCallGate: New stack probe unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1498 return rcStrict;
1499 }
1500
1501 /* Commit new SS:rSP. */
1502 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
1503 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
1504 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1505 pVCpu->cpum.GstCtx.ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);
1506 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1507 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1508 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1509 IEM_SET_CPL(pVCpu, uNewCSDpl); /** @todo Are the parameter words accessed using the new CPL or the old CPL? */
1510 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1511 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
1512
1513 /* At this point the stack access must not fail because new state was already committed. */
1514 /** @todo this can still fail due to SS.LIMIT not check. */
1515 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbNewStack,
1516 IEM_IS_LONG_MODE(pVCpu) ? 7
1517 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 1,
1518 &uPtrRet.pv, &uNewRsp);
1519 AssertMsgReturn(rcStrict == VINF_SUCCESS, ("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)),
1520 VERR_INTERNAL_ERROR_5);
1521
1522 if (!IEM_IS_LONG_MODE(pVCpu))
1523 {
1524 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1525 {
1526 if (cbWords)
1527 {
1528 /* Map the relevant chunk of the old stack. */
1529 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds,
1530 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
1531 if (rcStrict != VINF_SUCCESS)
1532 {
1533 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1534 return rcStrict;
1535 }
1536
1537 /* Copy the parameter (d)words. */
1538 for (int i = 0; i < cbWords; ++i)
1539 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i];
1540
1541 /* Unmap the old stack. */
1542 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1543 if (rcStrict != VINF_SUCCESS)
1544 {
1545 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1546 return rcStrict;
1547 }
1548 }
1549
1550 /* Push the old CS:rIP. */
1551 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1552 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1553
1554 /* Push the old SS:rSP. */
1555 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
1556 uPtrRet.pu32[2 + cbWords + 1] = uOldSS;
1557 }
1558 else
1559 {
1560 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1561
1562 if (cbWords)
1563 {
1564 /* Map the relevant chunk of the old stack. */
1565 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds,
1566 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
1567 if (rcStrict != VINF_SUCCESS)
1568 {
1569 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1570 return rcStrict;
1571 }
1572
1573 /* Copy the parameter words. */
1574 for (int i = 0; i < cbWords; ++i)
1575 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i];
1576
1577 /* Unmap the old stack. */
1578 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1579 if (rcStrict != VINF_SUCCESS)
1580 {
1581 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1582 return rcStrict;
1583 }
1584 }
1585
1586 /* Push the old CS:rIP. */
1587 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1588 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1589
1590 /* Push the old SS:rSP. */
1591 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp;
1592 uPtrRet.pu16[2 + cbWords + 1] = uOldSS;
1593 }
1594 }
1595 else
1596 {
1597 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1598
1599 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
1600 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
1601 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1602 uPtrRet.pu64[2] = uOldRsp;
1603 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */
1604 }
1605
1606 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1607 if (rcStrict != VINF_SUCCESS)
1608 {
1609 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1610 return rcStrict;
1611 }
1612
1613 /* Chop the high bits off if 16-bit gate (Intel says so). */
1614 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1615 uNewRip = (uint16_t)uNewRip;
1616
1617 /* Limit / canonical check. */
1618 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1619 if (!IEM_IS_LONG_MODE(pVCpu))
1620 {
1621 if (uNewRip > cbLimit)
1622 {
1623 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1624 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1625 }
1626 u64Base = X86DESC_BASE(&DescCS.Legacy);
1627 }
1628 else
1629 {
1630 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1631 if (!IEM_IS_CANONICAL(uNewRip))
1632 {
1633 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1634 return iemRaiseNotCanonical(pVCpu);
1635 }
1636 u64Base = 0;
1637 }
1638
1639 /*
1640 * Now set the accessed bit before
1641 * writing the return address to the stack and committing the result into
1642 * CS, CSHID and RIP.
1643 */
1644 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1645 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1646 {
1647 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1648 if (rcStrict != VINF_SUCCESS)
1649 return rcStrict;
1650 /** @todo check what VT-x and AMD-V does. */
1651 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1652 }
1653
1654 /* Commit new CS:rIP. */
1655 pVCpu->cpum.GstCtx.rip = uNewRip;
1656 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1657 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu);
1658 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1659 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1660 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1661 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1662 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1663 }
1664 else
1665 {
1666 /* Same privilege. */
1667 /** @todo This is very similar to regular far calls; merge! */
1668
1669 /* Check stack first - may #SS(0). */
1670 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
1671 * 16-bit code cause a two or four byte CS to be pushed? */
1672 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
1673 IEM_IS_LONG_MODE(pVCpu) ? 8+8
1674 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
1675 IEM_IS_LONG_MODE(pVCpu) ? 7
1676 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 2,
1677 &uPtrRet.pv, &uNewRsp);
1678 if (rcStrict != VINF_SUCCESS)
1679 return rcStrict;
1680
1681 /* Chop the high bits off if 16-bit gate (Intel says so). */
1682 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1683 uNewRip = (uint16_t)uNewRip;
1684
1685 /* Limit / canonical check. */
1686 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1687 if (!IEM_IS_LONG_MODE(pVCpu))
1688 {
1689 if (uNewRip > cbLimit)
1690 {
1691 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1692 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1693 }
1694 u64Base = X86DESC_BASE(&DescCS.Legacy);
1695 }
1696 else
1697 {
1698 if (!IEM_IS_CANONICAL(uNewRip))
1699 {
1700 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1701 return iemRaiseNotCanonical(pVCpu);
1702 }
1703 u64Base = 0;
1704 }
1705
1706 /*
1707 * Now set the accessed bit before
1708 * writing the return address to the stack and committing the result into
1709 * CS, CSHID and RIP.
1710 */
1711 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1712 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1713 {
1714 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1715 if (rcStrict != VINF_SUCCESS)
1716 return rcStrict;
1717 /** @todo check what VT-x and AMD-V does. */
1718 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1719 }
1720
1721 /* stack */
1722 if (!IEM_IS_LONG_MODE(pVCpu))
1723 {
1724 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1725 {
1726 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1727 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1728 }
1729 else
1730 {
1731 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1732 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1733 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1734 }
1735 }
1736 else
1737 {
1738 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1739 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
1740 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1741 }
1742
1743 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1744 if (rcStrict != VINF_SUCCESS)
1745 return rcStrict;
1746
1747 /* commit */
1748 pVCpu->cpum.GstCtx.rip = uNewRip;
1749 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1750 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu);
1751 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1752 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1753 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1754 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1755 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1756 }
1757 }
1758 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
1759
1760 iemRecalcExecModeAndCplFlags(pVCpu);
1761
1762/** @todo single stepping */
1763
1764 /* Flush the prefetch buffer. */
1765 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
1766 return VINF_SUCCESS;
1767#endif /* IEM_IMPLEMENTS_CALLGATE */
1768}
1769
1770
1771/**
1772 * Implements far jumps and calls thru system selectors.
1773 *
1774 * @returns VBox strict status code.
1775 * @param pVCpu The cross context virtual CPU structure of the
1776 * calling thread.
1777 * @param cbInstr The current instruction length.
1778 * @param uSel The selector.
1779 * @param enmBranch The kind of branching we're performing.
1780 * @param enmEffOpSize The effective operand size.
1781 * @param pDesc The descriptor corresponding to @a uSel.
1782 */
1783static VBOXSTRICTRC iemCImpl_BranchSysSel(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
1784 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
1785{
1786 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1787 Assert((uSel & X86_SEL_MASK_OFF_RPL));
1788 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1789
1790 if (IEM_IS_LONG_MODE(pVCpu))
1791 switch (pDesc->Legacy.Gen.u4Type)
1792 {
1793 case AMD64_SEL_TYPE_SYS_CALL_GATE:
1794 return iemCImpl_BranchCallGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1795
1796 default:
1797 case AMD64_SEL_TYPE_SYS_LDT:
1798 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
1799 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
1800 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
1801 case AMD64_SEL_TYPE_SYS_INT_GATE:
1802 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1803 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1804 }
1805
1806 switch (pDesc->Legacy.Gen.u4Type)
1807 {
1808 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1809 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1810 return iemCImpl_BranchCallGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1811
1812 case X86_SEL_TYPE_SYS_TASK_GATE:
1813 return iemCImpl_BranchTaskGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1814
1815 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1816 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1817 return iemCImpl_BranchTaskSegment(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1818
1819 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1820 Log(("branch %04x -> busy 286 TSS\n", uSel));
1821 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1822
1823 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1824 Log(("branch %04x -> busy 386 TSS\n", uSel));
1825 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1826
1827 default:
1828 case X86_SEL_TYPE_SYS_LDT:
1829 case X86_SEL_TYPE_SYS_286_INT_GATE:
1830 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1831 case X86_SEL_TYPE_SYS_386_INT_GATE:
1832 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1833 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1834 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1835 }
1836}
1837
1838
1839/**
1840 * Implements far jumps.
1841 *
1842 * @param uSel The selector.
1843 * @param offSeg The segment offset.
1844 * @param enmEffOpSize The effective operand size.
1845 */
1846IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1847{
1848 NOREF(cbInstr);
1849 Assert(offSeg <= UINT32_MAX || (!IEM_IS_GUEST_CPU_AMD(pVCpu) && IEM_IS_64BIT_CODE(pVCpu)));
1850
1851 /*
1852 * Real mode and V8086 mode are easy. The only snag seems to be that
1853 * CS.limit doesn't change and the limit check is done against the current
1854 * limit.
1855 */
1856 /** @todo Robert Collins claims (The Segment Descriptor Cache, DDJ August
1857 * 1998) that up to and including the Intel 486, far control
1858 * transfers in real mode set default CS attributes (0x93) and also
1859 * set a 64K segment limit. Starting with the Pentium, the
1860 * attributes and limit are left alone but the access rights are
1861 * ignored. We only implement the Pentium+ behavior.
1862 * */
1863 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
1864 {
1865 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1866 if (offSeg > pVCpu->cpum.GstCtx.cs.u32Limit)
1867 {
1868 Log(("iemCImpl_FarJmp: 16-bit limit\n"));
1869 return iemRaiseGeneralProtectionFault0(pVCpu);
1870 }
1871
1872 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
1873 pVCpu->cpum.GstCtx.rip = offSeg;
1874 else
1875 pVCpu->cpum.GstCtx.rip = offSeg & UINT16_MAX;
1876 pVCpu->cpum.GstCtx.cs.Sel = uSel;
1877 pVCpu->cpum.GstCtx.cs.ValidSel = uSel;
1878 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1879 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4;
1880
1881 /* Update the FLAT 32-bit mode flag, if we're in 32-bit unreal mode (unlikely): */
1882 if (RT_LIKELY(!IEM_IS_32BIT_CODE(pVCpu)))
1883 { /* likely */ }
1884 else if (uSel != 0)
1885 pVCpu->iem.s.fExec &= ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK;
1886 else
1887 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK)
1888 | iemCalc32BitFlatIndicator(pVCpu);
1889
1890 return iemRegFinishClearingRF(pVCpu);
1891 }
1892
1893 /*
1894 * Protected mode. Need to parse the specified descriptor...
1895 */
1896 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1897 {
1898 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1899 return iemRaiseGeneralProtectionFault0(pVCpu);
1900 }
1901
1902 /* Fetch the descriptor. */
1903 IEMSELDESC Desc;
1904 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
1905 if (rcStrict != VINF_SUCCESS)
1906 return rcStrict;
1907
1908 /* Is it there? */
1909 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
1910 {
1911 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1912 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1913 }
1914
1915 /*
1916 * Deal with it according to its type. We do the standard code selectors
1917 * here and dispatch the system selectors to worker functions.
1918 */
1919 if (!Desc.Legacy.Gen.u1DescType)
1920 return iemCImpl_BranchSysSel(pVCpu, cbInstr, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
1921
1922 /* Only code segments. */
1923 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1924 {
1925 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1926 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1927 }
1928
1929 /* L vs D. */
1930 if ( Desc.Legacy.Gen.u1Long
1931 && Desc.Legacy.Gen.u1DefBig
1932 && IEM_IS_LONG_MODE(pVCpu))
1933 {
1934 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1935 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1936 }
1937
1938 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1939 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1940 {
1941 if (IEM_GET_CPL(pVCpu) < Desc.Legacy.Gen.u2Dpl)
1942 {
1943 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1944 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1945 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1946 }
1947 }
1948 else
1949 {
1950 if (IEM_GET_CPL(pVCpu) != Desc.Legacy.Gen.u2Dpl)
1951 {
1952 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1953 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1954 }
1955 if ((uSel & X86_SEL_RPL) > IEM_GET_CPL(pVCpu))
1956 {
1957 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), IEM_GET_CPL(pVCpu)));
1958 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1959 }
1960 }
1961
1962 /* Chop the high bits if 16-bit (Intel says so). */
1963 if (enmEffOpSize == IEMMODE_16BIT)
1964 offSeg &= UINT16_MAX;
1965
1966 /* Limit check and get the base. */
1967 uint64_t u64Base;
1968 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1969 if ( !Desc.Legacy.Gen.u1Long
1970 || !IEM_IS_LONG_MODE(pVCpu))
1971 {
1972 if (RT_LIKELY(offSeg <= cbLimit))
1973 u64Base = X86DESC_BASE(&Desc.Legacy);
1974 else
1975 {
1976 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1977 /** @todo Intel says this is \#GP(0)! */
1978 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1979 }
1980 }
1981 else
1982 u64Base = 0;
1983
1984 /*
1985 * Ok, everything checked out fine. Now set the accessed bit before
1986 * committing the result into CS, CSHID and RIP.
1987 */
1988 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1989 {
1990 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
1991 if (rcStrict != VINF_SUCCESS)
1992 return rcStrict;
1993 /** @todo check what VT-x and AMD-V does. */
1994 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1995 }
1996
1997 /* commit */
1998 pVCpu->cpum.GstCtx.rip = offSeg;
1999 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2000 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu); /** @todo is this right for conforming segs? or in general? */
2001 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
2002 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2003 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2004 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2005 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2006
2007 /** @todo check if the hidden bits are loaded correctly for 64-bit
2008 * mode. */
2009
2010 iemRecalcExecModeAndCplFlags(pVCpu);
2011
2012 /* Flush the prefetch buffer. */
2013 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
2014
2015 return iemRegFinishClearingRF(pVCpu);
2016}
2017
2018
2019/**
2020 * Implements far calls.
2021 *
2022 * This very similar to iemCImpl_FarJmp.
2023 *
2024 * @param uSel The selector.
2025 * @param offSeg The segment offset.
2026 * @param enmEffOpSize The operand size (in case we need it).
2027 */
2028IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
2029{
2030 VBOXSTRICTRC rcStrict;
2031 uint64_t uNewRsp;
2032 RTPTRUNION uPtrRet;
2033
2034 /*
2035 * Real mode and V8086 mode are easy. The only snag seems to be that
2036 * CS.limit doesn't change and the limit check is done against the current
2037 * limit.
2038 */
2039 /** @todo See comment for similar code in iemCImpl_FarJmp */
2040 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2041 {
2042 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
2043
2044 /* Check stack first - may #SS(0). */
2045 rcStrict = iemMemStackPushBeginSpecial(pVCpu, enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2046 enmEffOpSize == IEMMODE_32BIT ? 3 : 1,
2047 &uPtrRet.pv, &uNewRsp);
2048 if (rcStrict != VINF_SUCCESS)
2049 return rcStrict;
2050
2051 /* Check the target address range. */
2052/** @todo this must be wrong! Write unreal mode tests! */
2053 if (offSeg > UINT32_MAX)
2054 return iemRaiseGeneralProtectionFault0(pVCpu);
2055
2056 /* Everything is fine, push the return address. */
2057 if (enmEffOpSize == IEMMODE_16BIT)
2058 {
2059 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
2060 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
2061 }
2062 else
2063 {
2064 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
2065 uPtrRet.pu16[2] = pVCpu->cpum.GstCtx.cs.Sel;
2066 }
2067 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2068 if (rcStrict != VINF_SUCCESS)
2069 return rcStrict;
2070
2071 /* Branch. */
2072 pVCpu->cpum.GstCtx.rip = offSeg;
2073 pVCpu->cpum.GstCtx.cs.Sel = uSel;
2074 pVCpu->cpum.GstCtx.cs.ValidSel = uSel;
2075 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2076 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4;
2077
2078 return iemRegFinishClearingRF(pVCpu);
2079 }
2080
2081 /*
2082 * Protected mode. Need to parse the specified descriptor...
2083 */
2084 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2085 {
2086 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
2087 return iemRaiseGeneralProtectionFault0(pVCpu);
2088 }
2089
2090 /* Fetch the descriptor. */
2091 IEMSELDESC Desc;
2092 rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
2093 if (rcStrict != VINF_SUCCESS)
2094 return rcStrict;
2095
2096 /*
2097 * Deal with it according to its type. We do the standard code selectors
2098 * here and dispatch the system selectors to worker functions.
2099 */
2100 if (!Desc.Legacy.Gen.u1DescType)
2101 return iemCImpl_BranchSysSel(pVCpu, cbInstr, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
2102
2103 /* Only code segments. */
2104 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2105 {
2106 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
2107 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2108 }
2109
2110 /* L vs D. */
2111 if ( Desc.Legacy.Gen.u1Long
2112 && Desc.Legacy.Gen.u1DefBig
2113 && IEM_IS_LONG_MODE(pVCpu))
2114 {
2115 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
2116 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2117 }
2118
2119 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
2120 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2121 {
2122 if (IEM_GET_CPL(pVCpu) < Desc.Legacy.Gen.u2Dpl)
2123 {
2124 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
2125 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2126 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2127 }
2128 }
2129 else
2130 {
2131 if (IEM_GET_CPL(pVCpu) != Desc.Legacy.Gen.u2Dpl)
2132 {
2133 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2134 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2135 }
2136 if ((uSel & X86_SEL_RPL) > IEM_GET_CPL(pVCpu))
2137 {
2138 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), IEM_GET_CPL(pVCpu)));
2139 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2140 }
2141 }
2142
2143 /* Is it there? */
2144 if (!Desc.Legacy.Gen.u1Present)
2145 {
2146 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
2147 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
2148 }
2149
2150 /* Check stack first - may #SS(0). */
2151 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
2152 * 16-bit code cause a two or four byte CS to be pushed? */
2153 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
2154 enmEffOpSize == IEMMODE_64BIT ? 8+8 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2155 enmEffOpSize == IEMMODE_64BIT ? 7 : enmEffOpSize == IEMMODE_32BIT ? 3 : 1,
2156 &uPtrRet.pv, &uNewRsp);
2157 if (rcStrict != VINF_SUCCESS)
2158 return rcStrict;
2159
2160 /* Chop the high bits if 16-bit (Intel says so). */
2161 if (enmEffOpSize == IEMMODE_16BIT)
2162 offSeg &= UINT16_MAX;
2163
2164 /* Limit / canonical check. */
2165 uint64_t u64Base;
2166 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2167 if ( !Desc.Legacy.Gen.u1Long
2168 || !IEM_IS_LONG_MODE(pVCpu))
2169 {
2170 if (RT_LIKELY(offSeg <= cbLimit))
2171 u64Base = X86DESC_BASE(&Desc.Legacy);
2172 else
2173 {
2174 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
2175 /** @todo Intel says this is \#GP(0)! */
2176 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2177 }
2178 }
2179 else if (IEM_IS_CANONICAL(offSeg))
2180 u64Base = 0;
2181 else
2182 {
2183 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
2184 return iemRaiseNotCanonical(pVCpu);
2185 }
2186
2187 /*
2188 * Now set the accessed bit before
2189 * writing the return address to the stack and committing the result into
2190 * CS, CSHID and RIP.
2191 */
2192 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2193 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2194 {
2195 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2196 if (rcStrict != VINF_SUCCESS)
2197 return rcStrict;
2198 /** @todo check what VT-x and AMD-V does. */
2199 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2200 }
2201
2202 /* stack */
2203 if (enmEffOpSize == IEMMODE_16BIT)
2204 {
2205 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
2206 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
2207 }
2208 else if (enmEffOpSize == IEMMODE_32BIT)
2209 {
2210 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
2211 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
2212 }
2213 else
2214 {
2215 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
2216 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
2217 }
2218 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2219 if (rcStrict != VINF_SUCCESS)
2220 return rcStrict;
2221
2222 /* commit */
2223 pVCpu->cpum.GstCtx.rip = offSeg;
2224 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2225 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu);
2226 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
2227 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2228 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2229 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2230 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2231
2232 /** @todo check if the hidden bits are loaded correctly for 64-bit
2233 * mode. */
2234
2235 iemRecalcExecDbgFlags(pVCpu);
2236
2237 /* Flush the prefetch buffer. */
2238 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
2239
2240 return iemRegFinishClearingRF(pVCpu);
2241}
2242
2243
2244/**
2245 * Implements retf.
2246 *
2247 * @param enmEffOpSize The effective operand size.
2248 * @param cbPop The amount of arguments to pop from the stack
2249 * (bytes).
2250 */
2251IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2252{
2253 VBOXSTRICTRC rcStrict;
2254 RTCPTRUNION uPtrFrame;
2255 RTUINT64U NewRsp;
2256 uint64_t uNewRip;
2257 uint16_t uNewCs;
2258 NOREF(cbInstr);
2259
2260 /*
2261 * Read the stack values first.
2262 */
2263 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
2264 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
2265 rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr,
2266 enmEffOpSize == IEMMODE_16BIT ? 1 : enmEffOpSize == IEMMODE_32BIT ? 3 : 7,
2267 &uPtrFrame.pv, &NewRsp.u);
2268 if (rcStrict != VINF_SUCCESS)
2269 return rcStrict;
2270 if (enmEffOpSize == IEMMODE_16BIT)
2271 {
2272 uNewRip = uPtrFrame.pu16[0];
2273 uNewCs = uPtrFrame.pu16[1];
2274 }
2275 else if (enmEffOpSize == IEMMODE_32BIT)
2276 {
2277 uNewRip = uPtrFrame.pu32[0];
2278 uNewCs = uPtrFrame.pu16[2];
2279 }
2280 else
2281 {
2282 uNewRip = uPtrFrame.pu64[0];
2283 uNewCs = uPtrFrame.pu16[4];
2284 }
2285 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2286 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2287 { /* extremely likely */ }
2288 else
2289 return rcStrict;
2290
2291 /*
2292 * Real mode and V8086 mode are easy.
2293 */
2294 /** @todo See comment for similar code in iemCImpl_FarJmp */
2295 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2296 {
2297 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2298 /** @todo check how this is supposed to work if sp=0xfffe. */
2299
2300 /* Check the limit of the new EIP. */
2301 /** @todo Intel pseudo code only does the limit check for 16-bit
2302 * operands, AMD does not make any distinction. What is right? */
2303 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
2304 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2305
2306 /* commit the operation. */
2307 if (cbPop)
2308 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
2309 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2310 pVCpu->cpum.GstCtx.rip = uNewRip;
2311 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2312 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2313 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2314 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4;
2315 return iemRegFinishClearingRF(pVCpu);
2316 }
2317
2318 /*
2319 * Protected mode is complicated, of course.
2320 */
2321 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2322 {
2323 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
2324 return iemRaiseGeneralProtectionFault0(pVCpu);
2325 }
2326
2327 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2328
2329 /* Fetch the descriptor. */
2330 IEMSELDESC DescCs;
2331 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCs, uNewCs, X86_XCPT_GP);
2332 if (rcStrict != VINF_SUCCESS)
2333 return rcStrict;
2334
2335 /* Can only return to a code selector. */
2336 if ( !DescCs.Legacy.Gen.u1DescType
2337 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
2338 {
2339 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
2340 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
2341 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2342 }
2343
2344 /* L vs D. */
2345 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
2346 && DescCs.Legacy.Gen.u1DefBig
2347 && IEM_IS_LONG_MODE(pVCpu))
2348 {
2349 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
2350 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2351 }
2352
2353 /* DPL/RPL/CPL checks. */
2354 if ((uNewCs & X86_SEL_RPL) < IEM_GET_CPL(pVCpu))
2355 {
2356 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, IEM_GET_CPL(pVCpu)));
2357 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2358 }
2359
2360 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2361 {
2362 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
2363 {
2364 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
2365 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2366 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2367 }
2368 }
2369 else
2370 {
2371 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
2372 {
2373 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
2374 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2375 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2376 }
2377 }
2378
2379 /* Is it there? */
2380 if (!DescCs.Legacy.Gen.u1Present)
2381 {
2382 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
2383 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2384 }
2385
2386 /*
2387 * Return to outer privilege? (We'll typically have entered via a call gate.)
2388 */
2389 if ((uNewCs & X86_SEL_RPL) != IEM_GET_CPL(pVCpu))
2390 {
2391 /* Read the outer stack pointer stored *after* the parameters. */
2392 rcStrict = iemMemStackPopContinueSpecial(pVCpu, cbPop /*off*/, cbRetPtr, &uPtrFrame.pv, NewRsp.u);
2393 if (rcStrict != VINF_SUCCESS)
2394 return rcStrict;
2395
2396 uint16_t uNewOuterSs;
2397 RTUINT64U NewOuterRsp;
2398 if (enmEffOpSize == IEMMODE_16BIT)
2399 {
2400 NewOuterRsp.u = uPtrFrame.pu16[0];
2401 uNewOuterSs = uPtrFrame.pu16[1];
2402 }
2403 else if (enmEffOpSize == IEMMODE_32BIT)
2404 {
2405 NewOuterRsp.u = uPtrFrame.pu32[0];
2406 uNewOuterSs = uPtrFrame.pu16[2];
2407 }
2408 else
2409 {
2410 NewOuterRsp.u = uPtrFrame.pu64[0];
2411 uNewOuterSs = uPtrFrame.pu16[4];
2412 }
2413 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2414 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2415 { /* extremely likely */ }
2416 else
2417 return rcStrict;
2418
2419 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
2420 and read the selector. */
2421 IEMSELDESC DescSs;
2422 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
2423 {
2424 if ( !DescCs.Legacy.Gen.u1Long
2425 || (uNewOuterSs & X86_SEL_RPL) == 3)
2426 {
2427 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
2428 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2429 return iemRaiseGeneralProtectionFault0(pVCpu);
2430 }
2431 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
2432 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
2433 }
2434 else
2435 {
2436 /* Fetch the descriptor for the new stack segment. */
2437 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);
2438 if (rcStrict != VINF_SUCCESS)
2439 return rcStrict;
2440 }
2441
2442 /* Check that RPL of stack and code selectors match. */
2443 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
2444 {
2445 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2446 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2447 }
2448
2449 /* Must be a writable data segment. */
2450 if ( !DescSs.Legacy.Gen.u1DescType
2451 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2452 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2453 {
2454 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
2455 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
2456 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2457 }
2458
2459 /* L vs D. (Not mentioned by intel.) */
2460 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
2461 && DescSs.Legacy.Gen.u1DefBig
2462 && IEM_IS_LONG_MODE(pVCpu))
2463 {
2464 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
2465 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2466 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2467 }
2468
2469 /* DPL/RPL/CPL checks. */
2470 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2471 {
2472 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
2473 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
2474 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2475 }
2476
2477 /* Is it there? */
2478 if (!DescSs.Legacy.Gen.u1Present)
2479 {
2480 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2481 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2482 }
2483
2484 /* Calc SS limit.*/
2485 uint64_t u64BaseSs;
2486 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
2487
2488 /* Is RIP canonical or within CS.limit? */
2489 uint64_t u64BaseCs;
2490 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2491
2492 /** @todo Testcase: Is this correct? */
2493 if ( DescCs.Legacy.Gen.u1Long
2494 && IEM_IS_LONG_MODE(pVCpu) )
2495 {
2496 if (!IEM_IS_CANONICAL(uNewRip))
2497 {
2498 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2499 return iemRaiseNotCanonical(pVCpu);
2500 }
2501 u64BaseCs = 0;
2502 u64BaseSs = 0;
2503 }
2504 else
2505 {
2506 if (uNewRip > cbLimitCs)
2507 {
2508 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
2509 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u, cbLimitCs));
2510 /** @todo Intel says this is \#GP(0)! */
2511 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2512 }
2513 u64BaseCs = X86DESC_BASE(&DescCs.Legacy);
2514 u64BaseSs = X86DESC_BASE(&DescSs.Legacy);
2515 }
2516
2517 /*
2518 * Now set the accessed bit before
2519 * writing the return address to the stack and committing the result into
2520 * CS, CSHID and RIP.
2521 */
2522 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
2523 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2524 {
2525 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2526 if (rcStrict != VINF_SUCCESS)
2527 return rcStrict;
2528 /** @todo check what VT-x and AMD-V does. */
2529 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2530 }
2531 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
2532 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2533 {
2534 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewOuterSs);
2535 if (rcStrict != VINF_SUCCESS)
2536 return rcStrict;
2537 /** @todo check what VT-x and AMD-V does. */
2538 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2539 }
2540
2541 /* commit */
2542 if (enmEffOpSize == IEMMODE_16BIT)
2543 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2544 else
2545 pVCpu->cpum.GstCtx.rip = uNewRip;
2546 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2547 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2548 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2549 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2550 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs;
2551 pVCpu->cpum.GstCtx.cs.u64Base = u64BaseCs;
2552 pVCpu->cpum.GstCtx.ss.Sel = uNewOuterSs;
2553 pVCpu->cpum.GstCtx.ss.ValidSel = uNewOuterSs;
2554 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2555 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
2556 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
2557 pVCpu->cpum.GstCtx.ss.u64Base = u64BaseSs;
2558
2559 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds);
2560 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es);
2561 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs);
2562 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs);
2563
2564 iemRecalcExecModeAndCplFlags(pVCpu); /* Affects iemRegAddToRspEx and the setting of RSP/SP below. */
2565
2566 if (cbPop)
2567 iemRegAddToRspEx(pVCpu, &NewOuterRsp, cbPop);
2568 if (IEM_IS_64BIT_CODE(pVCpu))
2569 pVCpu->cpum.GstCtx.rsp = NewOuterRsp.u;
2570 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2571 pVCpu->cpum.GstCtx.rsp = (uint32_t)NewOuterRsp.u;
2572 else
2573 pVCpu->cpum.GstCtx.sp = (uint16_t)NewOuterRsp.u;
2574
2575 iemRecalcExecModeAndCplFlags(pVCpu); /* Affects iemRegAddToRspEx and the setting of RSP/SP below. */
2576
2577 /** @todo check if the hidden bits are loaded correctly for 64-bit
2578 * mode. */
2579 }
2580 /*
2581 * Return to the same privilege level
2582 */
2583 else
2584 {
2585 /* Limit / canonical check. */
2586 uint64_t u64Base;
2587 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2588
2589 /** @todo Testcase: Is this correct? */
2590 bool f64BitCs = false;
2591 if ( DescCs.Legacy.Gen.u1Long
2592 && IEM_IS_LONG_MODE(pVCpu) )
2593 {
2594 if (!IEM_IS_CANONICAL(uNewRip))
2595 {
2596 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
2597 return iemRaiseNotCanonical(pVCpu);
2598 }
2599 u64Base = 0;
2600 f64BitCs = true;
2601 f64BitCs = true;
2602 }
2603 else
2604 {
2605 if (uNewRip > cbLimitCs)
2606 {
2607 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
2608 /** @todo Intel says this is \#GP(0)! */
2609 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2610 }
2611 u64Base = X86DESC_BASE(&DescCs.Legacy);
2612 }
2613
2614 /*
2615 * Now set the accessed bit before
2616 * writing the return address to the stack and committing the result into
2617 * CS, CSHID and RIP.
2618 */
2619 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2620 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2621 {
2622 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2623 if (rcStrict != VINF_SUCCESS)
2624 return rcStrict;
2625 /** @todo check what VT-x and AMD-V does. */
2626 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2627 }
2628
2629 /* commit */
2630 if (cbPop)
2631/** @todo This cannot be right. We're using the old CS mode here, and iemRegAddToRspEx checks fExec. */
2632 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
2633 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig || f64BitCs)
2634 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2635 else
2636 pVCpu->cpum.GstCtx.sp = (uint16_t)NewRsp.u;
2637 if (enmEffOpSize == IEMMODE_16BIT)
2638 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2639 else
2640 pVCpu->cpum.GstCtx.rip = uNewRip;
2641 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2642 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2643 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2644 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2645 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs;
2646 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2647 /** @todo check if the hidden bits are loaded correctly for 64-bit
2648 * mode. */
2649
2650 iemRecalcExecModeAndCplFlags(pVCpu);
2651 }
2652
2653 /* Flush the prefetch buffer. */
2654 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo use light flush for same privilege? */
2655
2656 return iemRegFinishClearingRF(pVCpu);
2657}
2658
2659
2660/**
2661 * Implements retn and retn imm16.
2662 *
2663 * We're doing this in C because of the \#GP that might be raised if the popped
2664 * program counter is out of bounds.
2665 *
2666 * The hope with this forced inline worker function, is that the compiler will
2667 * be clever enough to eliminate unused code for the constant enmEffOpSize and
2668 * maybe cbPop parameters.
2669 *
2670 * @param pVCpu The cross context virtual CPU structure of the
2671 * calling thread.
2672 * @param cbInstr The current instruction length.
2673 * @param enmEffOpSize The effective operand size. This is constant.
2674 * @param cbPop The amount of arguments to pop from the stack
2675 * (bytes). This can be constant (zero).
2676 */
2677DECL_FORCE_INLINE(VBOXSTRICTRC) iemCImpl_ReturnNearCommon(PVMCPUCC pVCpu, uint8_t cbInstr, IEMMODE enmEffOpSize, uint16_t cbPop)
2678{
2679 /* Fetch the RSP from the stack. */
2680 VBOXSTRICTRC rcStrict;
2681 RTUINT64U NewRip;
2682 RTUINT64U NewRsp;
2683 NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2684
2685 switch (enmEffOpSize)
2686 {
2687 case IEMMODE_16BIT:
2688 NewRip.u = 0;
2689 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRip.Words.w0, &NewRsp);
2690 break;
2691 case IEMMODE_32BIT:
2692 NewRip.u = 0;
2693 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRip.DWords.dw0, &NewRsp);
2694 break;
2695 case IEMMODE_64BIT:
2696 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRip.u, &NewRsp);
2697 break;
2698 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2699 }
2700 if (rcStrict != VINF_SUCCESS)
2701 return rcStrict;
2702
2703 /* Check the new RSP before loading it. */
2704 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
2705 * of it. The canonical test is performed here and for call. */
2706 if (enmEffOpSize != IEMMODE_64BIT)
2707 {
2708 if (RT_LIKELY(NewRip.DWords.dw0 <= pVCpu->cpum.GstCtx.cs.u32Limit))
2709 { /* likely */ }
2710 else
2711 {
2712 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pVCpu->cpum.GstCtx.cs.u32Limit));
2713 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2714 }
2715 }
2716 else
2717 {
2718 if (RT_LIKELY(IEM_IS_CANONICAL(NewRip.u)))
2719 { /* likely */ }
2720 else
2721 {
2722 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
2723 return iemRaiseNotCanonical(pVCpu);
2724 }
2725 }
2726
2727 /* Apply cbPop */
2728 if (cbPop)
2729 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
2730
2731 /* Commit it. */
2732 pVCpu->cpum.GstCtx.rip = NewRip.u;
2733 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2734
2735 /* Flush the prefetch buffer. */
2736 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo only need a light flush here, don't we? We don't really need any flushing... */
2737 RT_NOREF(cbInstr);
2738
2739 return iemRegFinishClearingRF(pVCpu);
2740}
2741
2742
2743/**
2744 * Implements retn imm16 with 16-bit effective operand size.
2745 *
2746 * @param cbPop The amount of arguments to pop from the stack (bytes).
2747 */
2748IEM_CIMPL_DEF_1(iemCImpl_retn_iw_16, uint16_t, cbPop)
2749{
2750 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_16BIT, cbPop);
2751}
2752
2753
2754/**
2755 * Implements retn imm16 with 32-bit effective operand size.
2756 *
2757 * @param cbPop The amount of arguments to pop from the stack (bytes).
2758 */
2759IEM_CIMPL_DEF_1(iemCImpl_retn_iw_32, uint16_t, cbPop)
2760{
2761 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_32BIT, cbPop);
2762}
2763
2764
2765/**
2766 * Implements retn imm16 with 64-bit effective operand size.
2767 *
2768 * @param cbPop The amount of arguments to pop from the stack (bytes).
2769 */
2770IEM_CIMPL_DEF_1(iemCImpl_retn_iw_64, uint16_t, cbPop)
2771{
2772 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_64BIT, cbPop);
2773}
2774
2775
2776/**
2777 * Implements retn with 16-bit effective operand size.
2778 */
2779IEM_CIMPL_DEF_0(iemCImpl_retn_16)
2780{
2781 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_16BIT, 0);
2782}
2783
2784
2785/**
2786 * Implements retn with 32-bit effective operand size.
2787 */
2788IEM_CIMPL_DEF_0(iemCImpl_retn_32)
2789{
2790 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_32BIT, 0);
2791}
2792
2793
2794/**
2795 * Implements retn with 64-bit effective operand size.
2796 */
2797IEM_CIMPL_DEF_0(iemCImpl_retn_64)
2798{
2799 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_64BIT, 0);
2800}
2801
2802
2803/**
2804 * Implements enter.
2805 *
2806 * We're doing this in C because the instruction is insane, even for the
2807 * u8NestingLevel=0 case dealing with the stack is tedious.
2808 *
2809 * @param enmEffOpSize The effective operand size.
2810 * @param cbFrame Frame size.
2811 * @param cParameters Frame parameter count.
2812 */
2813IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
2814{
2815 /* Push RBP, saving the old value in TmpRbp. */
2816 RTUINT64U NewRsp; NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2817 RTUINT64U TmpRbp; TmpRbp.u = pVCpu->cpum.GstCtx.rbp;
2818 RTUINT64U NewRbp;
2819 VBOXSTRICTRC rcStrict;
2820 if (enmEffOpSize == IEMMODE_64BIT)
2821 {
2822 rcStrict = iemMemStackPushU64Ex(pVCpu, TmpRbp.u, &NewRsp);
2823 NewRbp = NewRsp;
2824 }
2825 else if (enmEffOpSize == IEMMODE_32BIT)
2826 {
2827 rcStrict = iemMemStackPushU32Ex(pVCpu, TmpRbp.DWords.dw0, &NewRsp);
2828 NewRbp = NewRsp;
2829 }
2830 else
2831 {
2832 rcStrict = iemMemStackPushU16Ex(pVCpu, TmpRbp.Words.w0, &NewRsp);
2833 NewRbp = TmpRbp;
2834 NewRbp.Words.w0 = NewRsp.Words.w0;
2835 }
2836 if (rcStrict != VINF_SUCCESS)
2837 return rcStrict;
2838
2839 /* Copy the parameters (aka nesting levels by Intel). */
2840 cParameters &= 0x1f;
2841 if (cParameters > 0)
2842 {
2843 switch (enmEffOpSize)
2844 {
2845 case IEMMODE_16BIT:
2846 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2847 TmpRbp.DWords.dw0 -= 2;
2848 else
2849 TmpRbp.Words.w0 -= 2;
2850 do
2851 {
2852 uint16_t u16Tmp;
2853 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Tmp, &TmpRbp);
2854 if (rcStrict != VINF_SUCCESS)
2855 break;
2856 rcStrict = iemMemStackPushU16Ex(pVCpu, u16Tmp, &NewRsp);
2857 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2858 break;
2859
2860 case IEMMODE_32BIT:
2861 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2862 TmpRbp.DWords.dw0 -= 4;
2863 else
2864 TmpRbp.Words.w0 -= 4;
2865 do
2866 {
2867 uint32_t u32Tmp;
2868 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Tmp, &TmpRbp);
2869 if (rcStrict != VINF_SUCCESS)
2870 break;
2871 rcStrict = iemMemStackPushU32Ex(pVCpu, u32Tmp, &NewRsp);
2872 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2873 break;
2874
2875 case IEMMODE_64BIT:
2876 TmpRbp.u -= 8;
2877 do
2878 {
2879 uint64_t u64Tmp;
2880 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Tmp, &TmpRbp);
2881 if (rcStrict != VINF_SUCCESS)
2882 break;
2883 rcStrict = iemMemStackPushU64Ex(pVCpu, u64Tmp, &NewRsp);
2884 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2885 break;
2886
2887 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2888 }
2889 if (rcStrict != VINF_SUCCESS)
2890 return VINF_SUCCESS;
2891
2892 /* Push the new RBP */
2893 if (enmEffOpSize == IEMMODE_64BIT)
2894 rcStrict = iemMemStackPushU64Ex(pVCpu, NewRbp.u, &NewRsp);
2895 else if (enmEffOpSize == IEMMODE_32BIT)
2896 rcStrict = iemMemStackPushU32Ex(pVCpu, NewRbp.DWords.dw0, &NewRsp);
2897 else
2898 rcStrict = iemMemStackPushU16Ex(pVCpu, NewRbp.Words.w0, &NewRsp);
2899 if (rcStrict != VINF_SUCCESS)
2900 return rcStrict;
2901
2902 }
2903
2904 /* Recalc RSP. */
2905 iemRegSubFromRspEx(pVCpu, &NewRsp, cbFrame);
2906
2907 /** @todo Should probe write access at the new RSP according to AMD. */
2908 /** @todo Should handle accesses to the VMX APIC-access page. */
2909
2910 /* Commit it. */
2911 pVCpu->cpum.GstCtx.rbp = NewRbp.u;
2912 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2913 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
2914}
2915
2916
2917
2918/**
2919 * Implements leave.
2920 *
2921 * We're doing this in C because messing with the stack registers is annoying
2922 * since they depends on SS attributes.
2923 *
2924 * @param enmEffOpSize The effective operand size.
2925 */
2926IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
2927{
2928 /* Calculate the intermediate RSP from RBP and the stack attributes. */
2929 RTUINT64U NewRsp;
2930 if (IEM_IS_64BIT_CODE(pVCpu))
2931 NewRsp.u = pVCpu->cpum.GstCtx.rbp;
2932 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2933 NewRsp.u = pVCpu->cpum.GstCtx.ebp;
2934 else
2935 {
2936 /** @todo Check that LEAVE actually preserve the high EBP bits. */
2937 NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2938 NewRsp.Words.w0 = pVCpu->cpum.GstCtx.bp;
2939 }
2940
2941 /* Pop RBP according to the operand size. */
2942 VBOXSTRICTRC rcStrict;
2943 RTUINT64U NewRbp;
2944 switch (enmEffOpSize)
2945 {
2946 case IEMMODE_16BIT:
2947 NewRbp.u = pVCpu->cpum.GstCtx.rbp;
2948 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRbp.Words.w0, &NewRsp);
2949 break;
2950 case IEMMODE_32BIT:
2951 NewRbp.u = 0;
2952 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRbp.DWords.dw0, &NewRsp);
2953 break;
2954 case IEMMODE_64BIT:
2955 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRbp.u, &NewRsp);
2956 break;
2957 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2958 }
2959 if (rcStrict != VINF_SUCCESS)
2960 return rcStrict;
2961
2962
2963 /* Commit it. */
2964 pVCpu->cpum.GstCtx.rbp = NewRbp.u;
2965 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2966 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
2967}
2968
2969
2970/**
2971 * Implements int3 and int XX.
2972 *
2973 * @param u8Int The interrupt vector number.
2974 * @param enmInt The int instruction type.
2975 */
2976IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt)
2977{
2978 Assert(pVCpu->iem.s.cXcptRecursions == 0);
2979
2980 /*
2981 * We must check if this INT3 might belong to DBGF before raising a #BP.
2982 */
2983 if (u8Int == 3)
2984 {
2985 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2986 if (pVM->dbgf.ro.cEnabledInt3Breakpoints == 0)
2987 { /* likely: No vbox debugger breakpoints */ }
2988 else
2989 {
2990 VBOXSTRICTRC rcStrict = DBGFTrap03Handler(pVM, pVCpu, &pVCpu->cpum.GstCtx);
2991 Log(("iemCImpl_int: DBGFTrap03Handler -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2992 if (rcStrict != VINF_EM_RAW_GUEST_TRAP)
2993 return iemSetPassUpStatus(pVCpu, rcStrict);
2994 }
2995 }
2996/** @todo single stepping */
2997 return iemRaiseXcptOrInt(pVCpu,
2998 cbInstr,
2999 u8Int,
3000 IEM_XCPT_FLAGS_T_SOFT_INT | enmInt,
3001 0,
3002 0);
3003}
3004
3005
3006/**
3007 * Implements iret for real mode and V8086 mode.
3008 *
3009 * @param enmEffOpSize The effective operand size.
3010 */
3011IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
3012{
3013 X86EFLAGS Efl;
3014 Efl.u = IEMMISC_GET_EFL(pVCpu);
3015 NOREF(cbInstr);
3016
3017 /*
3018 * iret throws an exception if VME isn't enabled.
3019 */
3020 if ( Efl.Bits.u1VM
3021 && Efl.Bits.u2IOPL != 3
3022 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME))
3023 return iemRaiseGeneralProtectionFault0(pVCpu);
3024
3025 /*
3026 * Do the stack bits, but don't commit RSP before everything checks
3027 * out right.
3028 */
3029 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3030 VBOXSTRICTRC rcStrict;
3031 RTCPTRUNION uFrame;
3032 uint16_t uNewCs;
3033 uint32_t uNewEip;
3034 uint32_t uNewFlags;
3035 uint64_t uNewRsp;
3036 if (enmEffOpSize == IEMMODE_32BIT)
3037 {
3038 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 1, &uFrame.pv, &uNewRsp);
3039 if (rcStrict != VINF_SUCCESS)
3040 return rcStrict;
3041 uNewEip = uFrame.pu32[0];
3042 if (uNewEip > UINT16_MAX)
3043 return iemRaiseGeneralProtectionFault0(pVCpu);
3044
3045 uNewCs = (uint16_t)uFrame.pu32[1];
3046 uNewFlags = uFrame.pu32[2];
3047 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3048 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
3049 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
3050 | X86_EFL_ID;
3051 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3052 uNewFlags &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3053 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
3054 }
3055 else
3056 {
3057 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &uNewRsp);
3058 if (rcStrict != VINF_SUCCESS)
3059 return rcStrict;
3060 uNewEip = uFrame.pu16[0];
3061 uNewCs = uFrame.pu16[1];
3062 uNewFlags = uFrame.pu16[2];
3063 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3064 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
3065 uNewFlags |= Efl.u & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF);
3066 /** @todo The intel pseudo code does not indicate what happens to
3067 * reserved flags. We just ignore them. */
3068 /* Ancient CPU adjustments: See iemCImpl_popf. */
3069 if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286)
3070 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL);
3071 }
3072 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uFrame.pv);
3073 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3074 { /* extremely likely */ }
3075 else
3076 return rcStrict;
3077
3078 /** @todo Check how this is supposed to work if sp=0xfffe. */
3079 Log7(("iemCImpl_iret_real_v8086: uNewCs=%#06x uNewRip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n",
3080 uNewCs, uNewEip, uNewFlags, uNewRsp));
3081
3082 /*
3083 * Check the limit of the new EIP.
3084 */
3085 /** @todo Only the AMD pseudo code check the limit here, what's
3086 * right? */
3087 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
3088 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
3089
3090 /*
3091 * V8086 checks and flag adjustments
3092 */
3093 if (Efl.Bits.u1VM)
3094 {
3095 if (Efl.Bits.u2IOPL == 3)
3096 {
3097 /* Preserve IOPL and clear RF. */
3098 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
3099 uNewFlags |= Efl.u & (X86_EFL_IOPL);
3100 }
3101 else if ( enmEffOpSize == IEMMODE_16BIT
3102 && ( !(uNewFlags & X86_EFL_IF)
3103 || !Efl.Bits.u1VIP )
3104 && !(uNewFlags & X86_EFL_TF) )
3105 {
3106 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
3107 uNewFlags &= ~X86_EFL_VIF;
3108 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
3109 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
3110 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
3111 }
3112 else
3113 return iemRaiseGeneralProtectionFault0(pVCpu);
3114 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags));
3115 }
3116
3117 /*
3118 * Commit the operation.
3119 */
3120#ifdef DBGFTRACE_ENABLED
3121 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
3122 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewRsp);
3123#endif
3124 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3125 pVCpu->cpum.GstCtx.rip = uNewEip;
3126 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3127 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3128 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3129 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4;
3130 /** @todo do we load attribs and limit as well? */
3131 Assert(uNewFlags & X86_EFL_1);
3132 IEMMISC_SET_EFL(pVCpu, uNewFlags);
3133
3134 /* Flush the prefetch buffer. */
3135 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo can do light flush in real mode at least */
3136
3137/** @todo single stepping */
3138 return VINF_SUCCESS;
3139}
3140
3141
3142/**
3143 * Loads a segment register when entering V8086 mode.
3144 *
3145 * @param pSReg The segment register.
3146 * @param uSeg The segment to load.
3147 */
3148static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
3149{
3150 pSReg->Sel = uSeg;
3151 pSReg->ValidSel = uSeg;
3152 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3153 pSReg->u64Base = (uint32_t)uSeg << 4;
3154 pSReg->u32Limit = 0xffff;
3155 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
3156 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
3157 * IRET'ing to V8086. */
3158}
3159
3160
3161/**
3162 * Implements iret for protected mode returning to V8086 mode.
3163 *
3164 * @param uNewEip The new EIP.
3165 * @param uNewCs The new CS.
3166 * @param uNewFlags The new EFLAGS.
3167 * @param uNewRsp The RSP after the initial IRET frame.
3168 *
3169 * @note This can only be a 32-bit iret du to the X86_EFL_VM position.
3170 */
3171IEM_CIMPL_DEF_4(iemCImpl_iret_prot_v8086, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp)
3172{
3173 RT_NOREF_PV(cbInstr);
3174 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK);
3175
3176 /*
3177 * Pop the V8086 specific frame bits off the stack.
3178 */
3179 VBOXSTRICTRC rcStrict;
3180 RTCPTRUNION uFrame;
3181 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 24 /*cbMem*/, &uFrame.pv, uNewRsp);
3182 if (rcStrict != VINF_SUCCESS)
3183 return rcStrict;
3184 uint32_t uNewEsp = uFrame.pu32[0];
3185 uint16_t uNewSs = uFrame.pu32[1];
3186 uint16_t uNewEs = uFrame.pu32[2];
3187 uint16_t uNewDs = uFrame.pu32[3];
3188 uint16_t uNewFs = uFrame.pu32[4];
3189 uint16_t uNewGs = uFrame.pu32[5];
3190 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3191 if (rcStrict != VINF_SUCCESS)
3192 return rcStrict;
3193
3194 /*
3195 * Commit the operation.
3196 */
3197 uNewFlags &= X86_EFL_LIVE_MASK;
3198 uNewFlags |= X86_EFL_RA1_MASK;
3199#ifdef DBGFTRACE_ENABLED
3200 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
3201 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);
3202#endif
3203 Log7(("iemCImpl_iret_prot_v8086: %04x:%08x -> %04x:%04x %x %04x:%04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp));
3204
3205 IEMMISC_SET_EFL(pVCpu, uNewFlags);
3206 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.cs, uNewCs);
3207 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ss, uNewSs);
3208 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.es, uNewEs);
3209 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ds, uNewDs);
3210 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.fs, uNewFs);
3211 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.gs, uNewGs);
3212 pVCpu->cpum.GstCtx.rip = (uint16_t)uNewEip;
3213 pVCpu->cpum.GstCtx.rsp = uNewEsp; /** @todo check this out! */
3214 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
3215 | (3 << IEM_F_X86_CPL_SHIFT)
3216 | IEM_F_MODE_X86_16BIT_PROT_V86;
3217
3218 /* Flush the prefetch buffer. */
3219 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
3220
3221/** @todo single stepping */
3222 return VINF_SUCCESS;
3223}
3224
3225
3226/**
3227 * Implements iret for protected mode returning via a nested task.
3228 *
3229 * @param enmEffOpSize The effective operand size.
3230 */
3231IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
3232{
3233 Log7(("iemCImpl_iret_prot_NestedTask:\n"));
3234#ifndef IEM_IMPLEMENTS_TASKSWITCH
3235 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
3236#else
3237 RT_NOREF_PV(enmEffOpSize);
3238
3239 /*
3240 * Read the segment selector in the link-field of the current TSS.
3241 */
3242 RTSEL uSelRet;
3243 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &uSelRet, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base);
3244 if (rcStrict != VINF_SUCCESS)
3245 return rcStrict;
3246
3247 /*
3248 * Fetch the returning task's TSS descriptor from the GDT.
3249 */
3250 if (uSelRet & X86_SEL_LDT)
3251 {
3252 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet));
3253 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet);
3254 }
3255
3256 IEMSELDESC TssDesc;
3257 rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelRet, X86_XCPT_GP);
3258 if (rcStrict != VINF_SUCCESS)
3259 return rcStrict;
3260
3261 if (TssDesc.Legacy.Gate.u1DescType)
3262 {
3263 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet));
3264 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3265 }
3266
3267 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
3268 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
3269 {
3270 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
3271 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3272 }
3273
3274 if (!TssDesc.Legacy.Gate.u1Present)
3275 {
3276 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet));
3277 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3278 }
3279
3280 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
3281 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
3282 0 /* uCr2 */, uSelRet, &TssDesc);
3283#endif
3284}
3285
3286
3287/**
3288 * Implements iret for protected mode
3289 *
3290 * @param enmEffOpSize The effective operand size.
3291 */
3292IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
3293{
3294 NOREF(cbInstr);
3295 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3296
3297 /*
3298 * Nested task return.
3299 */
3300 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT)
3301 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
3302
3303 /*
3304 * Normal return.
3305 *
3306 * Do the stack bits, but don't commit RSP before everything checks
3307 * out right.
3308 */
3309 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3310 VBOXSTRICTRC rcStrict;
3311 RTCPTRUNION uFrame;
3312 uint16_t uNewCs;
3313 uint32_t uNewEip;
3314 uint32_t uNewFlags;
3315 uint64_t uNewRsp;
3316 if (enmEffOpSize == IEMMODE_32BIT)
3317 {
3318 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 3, &uFrame.pv, &uNewRsp);
3319 if (rcStrict != VINF_SUCCESS)
3320 return rcStrict;
3321 uNewEip = uFrame.pu32[0];
3322 uNewCs = (uint16_t)uFrame.pu32[1];
3323 uNewFlags = uFrame.pu32[2];
3324 }
3325 else
3326 {
3327 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &uNewRsp);
3328 if (rcStrict != VINF_SUCCESS)
3329 return rcStrict;
3330 uNewEip = uFrame.pu16[0];
3331 uNewCs = uFrame.pu16[1];
3332 uNewFlags = uFrame.pu16[2];
3333 }
3334 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3335 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3336 { /* extremely likely */ }
3337 else
3338 return rcStrict;
3339 Log7(("iemCImpl_iret_prot: uNewCs=%#06x uNewEip=%#010x uNewFlags=%#x uNewRsp=%#18llx uCpl=%u\n", uNewCs, uNewEip, uNewFlags, uNewRsp, IEM_GET_CPL(pVCpu)));
3340
3341 /*
3342 * We're hopefully not returning to V8086 mode...
3343 */
3344 if ( (uNewFlags & X86_EFL_VM)
3345 && IEM_GET_CPL(pVCpu) == 0)
3346 {
3347 Assert(enmEffOpSize == IEMMODE_32BIT);
3348 return IEM_CIMPL_CALL_4(iemCImpl_iret_prot_v8086, uNewEip, uNewCs, uNewFlags, uNewRsp);
3349 }
3350
3351 /*
3352 * Protected mode.
3353 */
3354 /* Read the CS descriptor. */
3355 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3356 {
3357 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
3358 return iemRaiseGeneralProtectionFault0(pVCpu);
3359 }
3360
3361 IEMSELDESC DescCS;
3362 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3363 if (rcStrict != VINF_SUCCESS)
3364 {
3365 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
3366 return rcStrict;
3367 }
3368
3369 /* Must be a code descriptor. */
3370 if (!DescCS.Legacy.Gen.u1DescType)
3371 {
3372 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3373 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3374 }
3375 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3376 {
3377 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3378 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3379 }
3380
3381 /* Privilege checks. */
3382 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3383 {
3384 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3385 {
3386 Log(("iret %04x:%08x - RPL != DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3387 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3388 }
3389 }
3390 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3391 {
3392 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3393 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3394 }
3395 if ((uNewCs & X86_SEL_RPL) < IEM_GET_CPL(pVCpu))
3396 {
3397 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, IEM_GET_CPL(pVCpu)));
3398 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3399 }
3400
3401 /* Present? */
3402 if (!DescCS.Legacy.Gen.u1Present)
3403 {
3404 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
3405 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3406 }
3407
3408 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3409
3410 /*
3411 * Return to outer level?
3412 */
3413 if ((uNewCs & X86_SEL_RPL) != IEM_GET_CPL(pVCpu))
3414 {
3415 uint16_t uNewSS;
3416 uint32_t uNewESP;
3417 if (enmEffOpSize == IEMMODE_32BIT)
3418 {
3419 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0/*off*/, 8 /*cbMem*/, &uFrame.pv, uNewRsp);
3420 if (rcStrict != VINF_SUCCESS)
3421 return rcStrict;
3422/** @todo We might be popping a 32-bit ESP from the IRET frame, but whether
3423 * 16-bit or 32-bit are being loaded into SP depends on the D/B
3424 * bit of the popped SS selector it turns out. */
3425 uNewESP = uFrame.pu32[0];
3426 uNewSS = (uint16_t)uFrame.pu32[1];
3427 }
3428 else
3429 {
3430 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 4 /*cbMem*/, &uFrame.pv, uNewRsp);
3431 if (rcStrict != VINF_SUCCESS)
3432 return rcStrict;
3433 uNewESP = uFrame.pu16[0];
3434 uNewSS = uFrame.pu16[1];
3435 }
3436 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
3437 if (rcStrict != VINF_SUCCESS)
3438 return rcStrict;
3439 Log7(("iemCImpl_iret_prot: uNewSS=%#06x uNewESP=%#010x\n", uNewSS, uNewESP));
3440
3441 /* Read the SS descriptor. */
3442 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3443 {
3444 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
3445 return iemRaiseGeneralProtectionFault0(pVCpu);
3446 }
3447
3448 IEMSELDESC DescSS;
3449 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
3450 if (rcStrict != VINF_SUCCESS)
3451 {
3452 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
3453 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
3454 return rcStrict;
3455 }
3456
3457 /* Privilege checks. */
3458 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3459 {
3460 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
3461 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3462 }
3463 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3464 {
3465 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
3466 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
3467 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3468 }
3469
3470 /* Must be a writeable data segment descriptor. */
3471 if (!DescSS.Legacy.Gen.u1DescType)
3472 {
3473 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
3474 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3475 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3476 }
3477 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3478 {
3479 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
3480 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3481 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3482 }
3483
3484 /* Present? */
3485 if (!DescSS.Legacy.Gen.u1Present)
3486 {
3487 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
3488 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
3489 }
3490
3491 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3492
3493 /* Check EIP. */
3494 if (uNewEip > cbLimitCS)
3495 {
3496 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
3497 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
3498 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3499 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3500 }
3501
3502 /*
3503 * Commit the changes, marking CS and SS accessed first since
3504 * that may fail.
3505 */
3506 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3507 {
3508 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3509 if (rcStrict != VINF_SUCCESS)
3510 return rcStrict;
3511 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3512 }
3513 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3514 {
3515 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3516 if (rcStrict != VINF_SUCCESS)
3517 return rcStrict;
3518 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3519 }
3520
3521 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3522 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3523 if (enmEffOpSize != IEMMODE_16BIT)
3524 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3525 if (IEM_GET_CPL(pVCpu) == 0)
3526 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3527 else if (IEM_GET_CPL(pVCpu) <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL)
3528 fEFlagsMask |= X86_EFL_IF;
3529 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3530 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3531 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu);
3532 fEFlagsNew &= ~fEFlagsMask;
3533 fEFlagsNew |= uNewFlags & fEFlagsMask;
3534#ifdef DBGFTRACE_ENABLED
3535 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
3536 IEM_GET_CPL(pVCpu), uNewCs & X86_SEL_RPL, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3537 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP);
3538#endif
3539
3540 IEMMISC_SET_EFL(pVCpu, fEFlagsNew);
3541 pVCpu->cpum.GstCtx.rip = uNewEip;
3542 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3543 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3544 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3545 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3546 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3547 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3548
3549 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3550 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3551 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3552 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3553 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
3554 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3555 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3556 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewESP;
3557 else
3558 pVCpu->cpum.GstCtx.rsp = uNewESP;
3559
3560 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds);
3561 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es);
3562 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs);
3563 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs);
3564
3565 iemRecalcExecModeAndCplFlags(pVCpu);
3566
3567 /* Done! */
3568
3569 }
3570 /*
3571 * Return to the same level.
3572 */
3573 else
3574 {
3575 /* Check EIP. */
3576 if (uNewEip > cbLimitCS)
3577 {
3578 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
3579 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3580 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3581 }
3582
3583 /*
3584 * Commit the changes, marking CS first since it may fail.
3585 */
3586 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3587 {
3588 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3589 if (rcStrict != VINF_SUCCESS)
3590 return rcStrict;
3591 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3592 }
3593
3594 X86EFLAGS NewEfl;
3595 NewEfl.u = IEMMISC_GET_EFL(pVCpu);
3596 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3597 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3598 if (enmEffOpSize != IEMMODE_16BIT)
3599 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3600 if (IEM_GET_CPL(pVCpu) == 0)
3601 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3602 else if (IEM_GET_CPL(pVCpu) <= NewEfl.Bits.u2IOPL)
3603 fEFlagsMask |= X86_EFL_IF;
3604 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3605 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3606 NewEfl.u &= ~fEFlagsMask;
3607 NewEfl.u |= fEFlagsMask & uNewFlags;
3608#ifdef DBGFTRACE_ENABLED
3609 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
3610 IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3611 uNewCs, uNewEip, uNewFlags, pVCpu->cpum.GstCtx.ss.Sel, uNewRsp);
3612#endif
3613
3614 IEMMISC_SET_EFL(pVCpu, NewEfl.u);
3615 pVCpu->cpum.GstCtx.rip = uNewEip;
3616 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3617 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3618 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3619 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3620 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3621 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3622 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3623 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
3624 else
3625 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3626
3627 iemRecalcExecModeAndCplFlags(pVCpu);
3628
3629 /* Done! */
3630 }
3631
3632 /* Flush the prefetch buffer. */
3633 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo may light flush if same ring? */
3634
3635/** @todo single stepping */
3636 return VINF_SUCCESS;
3637}
3638
3639
3640/**
3641 * Implements iret for long mode
3642 *
3643 * @param enmEffOpSize The effective operand size.
3644 */
3645IEM_CIMPL_DEF_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize)
3646{
3647 NOREF(cbInstr);
3648
3649 /*
3650 * Nested task return is not supported in long mode.
3651 */
3652 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT)
3653 {
3654 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.eflags.u));
3655 return iemRaiseGeneralProtectionFault0(pVCpu);
3656 }
3657
3658 /*
3659 * Normal return.
3660 *
3661 * Do the stack bits, but don't commit RSP before everything checks
3662 * out right.
3663 */
3664 VBOXSTRICTRC rcStrict;
3665 RTCPTRUNION uFrame;
3666 uint64_t uNewRip;
3667 uint16_t uNewCs;
3668 uint16_t uNewSs;
3669 uint32_t uNewFlags;
3670 uint64_t uNewRsp;
3671 if (enmEffOpSize == IEMMODE_64BIT)
3672 {
3673 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, 7, &uFrame.pv, &uNewRsp);
3674 if (rcStrict != VINF_SUCCESS)
3675 return rcStrict;
3676 uNewRip = uFrame.pu64[0];
3677 uNewCs = (uint16_t)uFrame.pu64[1];
3678 uNewFlags = (uint32_t)uFrame.pu64[2];
3679 uNewRsp = uFrame.pu64[3];
3680 uNewSs = (uint16_t)uFrame.pu64[4];
3681 }
3682 else if (enmEffOpSize == IEMMODE_32BIT)
3683 {
3684 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, 3, &uFrame.pv, &uNewRsp);
3685 if (rcStrict != VINF_SUCCESS)
3686 return rcStrict;
3687 uNewRip = uFrame.pu32[0];
3688 uNewCs = (uint16_t)uFrame.pu32[1];
3689 uNewFlags = uFrame.pu32[2];
3690 uNewRsp = uFrame.pu32[3];
3691 uNewSs = (uint16_t)uFrame.pu32[4];
3692 }
3693 else
3694 {
3695 Assert(enmEffOpSize == IEMMODE_16BIT);
3696 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, 1, &uFrame.pv, &uNewRsp);
3697 if (rcStrict != VINF_SUCCESS)
3698 return rcStrict;
3699 uNewRip = uFrame.pu16[0];
3700 uNewCs = uFrame.pu16[1];
3701 uNewFlags = uFrame.pu16[2];
3702 uNewRsp = uFrame.pu16[3];
3703 uNewSs = uFrame.pu16[4];
3704 }
3705 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3706 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3707 { /* extremely like */ }
3708 else
3709 return rcStrict;
3710 Log7(("iretq stack: cs:rip=%04x:%016RX64 rflags=%016RX64 ss:rsp=%04x:%016RX64\n", uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
3711
3712 /*
3713 * Check stuff.
3714 */
3715 /* Read the CS descriptor. */
3716 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3717 {
3718 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3719 return iemRaiseGeneralProtectionFault0(pVCpu);
3720 }
3721
3722 IEMSELDESC DescCS;
3723 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3724 if (rcStrict != VINF_SUCCESS)
3725 {
3726 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
3727 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3728 return rcStrict;
3729 }
3730
3731 /* Must be a code descriptor. */
3732 if ( !DescCS.Legacy.Gen.u1DescType
3733 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3734 {
3735 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
3736 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3737 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3738 }
3739
3740 /* Privilege checks. */
3741 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
3742 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3743 {
3744 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3745 {
3746 Log(("iret %04x:%016RX64 - RPL != DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3747 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3748 }
3749 }
3750 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3751 {
3752 Log(("iret %04x:%016RX64 - RPL < DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3753 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3754 }
3755 if ((uNewCs & X86_SEL_RPL) < IEM_GET_CPL(pVCpu))
3756 {
3757 Log(("iret %04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, IEM_GET_CPL(pVCpu)));
3758 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3759 }
3760
3761 /* Present? */
3762 if (!DescCS.Legacy.Gen.u1Present)
3763 {
3764 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3765 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3766 }
3767
3768 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3769
3770 /* Read the SS descriptor. */
3771 IEMSELDESC DescSS;
3772 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3773 {
3774 if ( !DescCS.Legacy.Gen.u1Long
3775 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
3776 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
3777 {
3778 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3779 return iemRaiseGeneralProtectionFault0(pVCpu);
3780 }
3781 /* Make sure SS is sensible, marked as accessed etc. */
3782 iemMemFakeStackSelDesc(&DescSS, (uNewSs & X86_SEL_RPL));
3783 }
3784 else
3785 {
3786 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
3787 if (rcStrict != VINF_SUCCESS)
3788 {
3789 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
3790 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3791 return rcStrict;
3792 }
3793 }
3794
3795 /* Privilege checks. */
3796 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3797 {
3798 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3799 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3800 }
3801
3802 uint32_t cbLimitSs;
3803 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3804 cbLimitSs = UINT32_MAX;
3805 else
3806 {
3807 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3808 {
3809 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
3810 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
3811 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3812 }
3813
3814 /* Must be a writeable data segment descriptor. */
3815 if (!DescSS.Legacy.Gen.u1DescType)
3816 {
3817 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
3818 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3819 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3820 }
3821 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3822 {
3823 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
3824 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3825 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3826 }
3827
3828 /* Present? */
3829 if (!DescSS.Legacy.Gen.u1Present)
3830 {
3831 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3832 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSs);
3833 }
3834 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3835 }
3836
3837 /* Check EIP. */
3838 if (DescCS.Legacy.Gen.u1Long)
3839 {
3840 if (!IEM_IS_CANONICAL(uNewRip))
3841 {
3842 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
3843 uNewCs, uNewRip, uNewSs, uNewRsp));
3844 return iemRaiseNotCanonical(pVCpu);
3845 }
3846/** @todo check the location of this... Testcase. */
3847 if (RT_LIKELY(!DescCS.Legacy.Gen.u1DefBig))
3848 { /* likely */ }
3849 else
3850 {
3851 Log(("iret %04x:%016RX64/%04x:%016RX64 -> both L and D are set -> #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3852 return iemRaiseGeneralProtectionFault0(pVCpu);
3853 }
3854 }
3855 else
3856 {
3857 if (uNewRip > cbLimitCS)
3858 {
3859 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
3860 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
3861 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3862 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3863 }
3864 }
3865
3866 /*
3867 * Commit the changes, marking CS and SS accessed first since
3868 * that may fail.
3869 */
3870 /** @todo where exactly are these actually marked accessed by a real CPU? */
3871 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3872 {
3873 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3874 if (rcStrict != VINF_SUCCESS)
3875 return rcStrict;
3876 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3877 }
3878 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3879 {
3880 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSs);
3881 if (rcStrict != VINF_SUCCESS)
3882 return rcStrict;
3883 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3884 }
3885
3886 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3887 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3888 if (enmEffOpSize != IEMMODE_16BIT)
3889 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3890 if (IEM_GET_CPL(pVCpu) == 0)
3891 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
3892 else if (IEM_GET_CPL(pVCpu) <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL)
3893 fEFlagsMask |= X86_EFL_IF;
3894 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu);
3895 fEFlagsNew &= ~fEFlagsMask;
3896 fEFlagsNew |= uNewFlags & fEFlagsMask;
3897#ifdef DBGFTRACE_ENABLED
3898 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
3899 IEM_GET_CPL(pVCpu), uNewCpl, pVCpu->cpum.GstCtx.rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);
3900#endif
3901
3902 IEMMISC_SET_EFL(pVCpu, fEFlagsNew);
3903 pVCpu->cpum.GstCtx.rip = uNewRip;
3904 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3905 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3906 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3907 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3908 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3909 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3910 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long || pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig)
3911 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3912 else
3913 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
3914 pVCpu->cpum.GstCtx.ss.Sel = uNewSs;
3915 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs;
3916 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3917 {
3918 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3919 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
3920 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3921 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3922 Log2(("iretq new SS: NULL\n"));
3923 }
3924 else
3925 {
3926 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3927 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3928 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
3929 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3930 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u));
3931 }
3932
3933 if (IEM_GET_CPL(pVCpu) != uNewCpl)
3934 {
3935 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.ds);
3936 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.es);
3937 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.fs);
3938 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.gs);
3939 }
3940
3941 iemRecalcExecModeAndCplFlags(pVCpu);
3942
3943 /* Flush the prefetch buffer. */
3944 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo may light flush if the ring + mode doesn't change */
3945
3946/** @todo single stepping */
3947 return VINF_SUCCESS;
3948}
3949
3950
3951/**
3952 * Implements iret.
3953 *
3954 * @param enmEffOpSize The effective operand size.
3955 */
3956IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
3957{
3958 bool fBlockingNmi = CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx);
3959
3960#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3961 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3962 {
3963 /*
3964 * Record whether NMI (or virtual-NMI) blocking is in effect during the execution
3965 * of this IRET instruction. We need to provide this information as part of some
3966 * VM-exits.
3967 *
3968 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events".
3969 */
3970 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_VIRT_NMI))
3971 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking;
3972 else
3973 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = fBlockingNmi;
3974
3975 /*
3976 * If "NMI exiting" is set, IRET does not affect blocking of NMIs.
3977 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3978 */
3979 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_NMI_EXIT))
3980 fBlockingNmi = false;
3981
3982 /* Clear virtual-NMI blocking, if any, before causing any further exceptions. */
3983 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false;
3984 }
3985#endif
3986
3987 /*
3988 * The SVM nested-guest intercept for IRET takes priority over all exceptions,
3989 * The NMI is still held pending (which I assume means blocking of further NMIs
3990 * is in effect).
3991 *
3992 * See AMD spec. 15.9 "Instruction Intercepts".
3993 * See AMD spec. 15.21.9 "NMI Support".
3994 */
3995 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IRET))
3996 {
3997 Log(("iret: Guest intercept -> #VMEXIT\n"));
3998 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
3999 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4000 }
4001
4002 /*
4003 * Clear NMI blocking, if any, before causing any further exceptions.
4004 * See Intel spec. 6.7.1 "Handling Multiple NMIs".
4005 */
4006 if (fBlockingNmi)
4007 CPUMClearInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
4008
4009 /*
4010 * Call a mode specific worker.
4011 */
4012 VBOXSTRICTRC rcStrict;
4013 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4014 rcStrict = IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
4015 else
4016 {
4017 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
4018 if (IEM_IS_64BIT_CODE(pVCpu))
4019 rcStrict = IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize);
4020 else
4021 rcStrict = IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
4022 }
4023
4024#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4025 /*
4026 * Clear NMI unblocking IRET state with the completion of IRET.
4027 */
4028 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4029 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = false;
4030#endif
4031 return rcStrict;
4032}
4033
4034
4035static void iemLoadallSetSelector(PVMCPUCC pVCpu, uint8_t iSegReg, uint16_t uSel)
4036{
4037 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4038
4039 pHid->Sel = uSel;
4040 pHid->ValidSel = uSel;
4041 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4042}
4043
4044
4045static void iemLoadall286SetDescCache(PVMCPUCC pVCpu, uint8_t iSegReg, uint8_t const *pbMem)
4046{
4047 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4048
4049 /* The base is in the first three bytes. */
4050 pHid->u64Base = pbMem[0] + (pbMem[1] << 8) + (pbMem[2] << 16);
4051 /* The attributes are in the fourth byte. */
4052 pHid->Attr.u = pbMem[3];
4053 pHid->Attr.u &= ~(X86DESCATTR_L | X86DESCATTR_D); /* (just to be on the safe side) */
4054 /* The limit is in the last two bytes. */
4055 pHid->u32Limit = pbMem[4] + (pbMem[5] << 8);
4056}
4057
4058
4059/**
4060 * Implements 286 LOADALL (286 CPUs only).
4061 */
4062IEM_CIMPL_DEF_0(iemCImpl_loadall286)
4063{
4064 NOREF(cbInstr);
4065
4066 /* Data is loaded from a buffer at 800h. No checks are done on the
4067 * validity of loaded state.
4068 *
4069 * LOADALL only loads the internal CPU state, it does not access any
4070 * GDT, LDT, or similar tables.
4071 */
4072
4073 if (IEM_GET_CPL(pVCpu) != 0)
4074 {
4075 Log(("loadall286: CPL must be 0 not %u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
4076 return iemRaiseGeneralProtectionFault0(pVCpu);
4077 }
4078
4079 uint8_t const *pbMem = NULL;
4080 uint16_t const *pa16Mem;
4081 uint8_t const *pa8Mem;
4082 RTGCPHYS GCPtrStart = 0x800; /* Fixed table location. */
4083 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pbMem, 0x66, UINT8_MAX, GCPtrStart, IEM_ACCESS_SYS_R, 0);
4084 if (rcStrict != VINF_SUCCESS)
4085 return rcStrict;
4086
4087 /* The MSW is at offset 0x06. */
4088 pa16Mem = (uint16_t const *)(pbMem + 0x06);
4089 /* Even LOADALL can't clear the MSW.PE bit, though it can set it. */
4090 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
4091 uNewCr0 |= *pa16Mem & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
4092 uint64_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
4093
4094 CPUMSetGuestCR0(pVCpu, uNewCr0);
4095 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCr0);
4096
4097 /* Inform PGM if mode changed. */
4098 if ((uNewCr0 & X86_CR0_PE) != (uOldCr0 & X86_CR0_PE))
4099 {
4100 int rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
4101 AssertRCReturn(rc, rc);
4102 /* ignore informational status codes */
4103 }
4104 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
4105 false /* fForce */);
4106
4107 /* TR selector is at offset 0x16. */
4108 pa16Mem = (uint16_t const *)(pbMem + 0x16);
4109 pVCpu->cpum.GstCtx.tr.Sel = pa16Mem[0];
4110 pVCpu->cpum.GstCtx.tr.ValidSel = pa16Mem[0];
4111 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4112
4113 /* Followed by FLAGS... */
4114 pVCpu->cpum.GstCtx.eflags.u = pa16Mem[1] | X86_EFL_1;
4115 pVCpu->cpum.GstCtx.ip = pa16Mem[2]; /* ...and IP. */
4116
4117 /* LDT is at offset 0x1C. */
4118 pa16Mem = (uint16_t const *)(pbMem + 0x1C);
4119 pVCpu->cpum.GstCtx.ldtr.Sel = pa16Mem[0];
4120 pVCpu->cpum.GstCtx.ldtr.ValidSel = pa16Mem[0];
4121 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4122
4123 /* Segment registers are at offset 0x1E. */
4124 pa16Mem = (uint16_t const *)(pbMem + 0x1E);
4125 iemLoadallSetSelector(pVCpu, X86_SREG_DS, pa16Mem[0]);
4126 iemLoadallSetSelector(pVCpu, X86_SREG_SS, pa16Mem[1]);
4127 iemLoadallSetSelector(pVCpu, X86_SREG_CS, pa16Mem[2]);
4128 iemLoadallSetSelector(pVCpu, X86_SREG_ES, pa16Mem[3]);
4129
4130 /* GPRs are at offset 0x26. */
4131 pa16Mem = (uint16_t const *)(pbMem + 0x26);
4132 pVCpu->cpum.GstCtx.di = pa16Mem[0];
4133 pVCpu->cpum.GstCtx.si = pa16Mem[1];
4134 pVCpu->cpum.GstCtx.bp = pa16Mem[2];
4135 pVCpu->cpum.GstCtx.sp = pa16Mem[3];
4136 pVCpu->cpum.GstCtx.bx = pa16Mem[4];
4137 pVCpu->cpum.GstCtx.dx = pa16Mem[5];
4138 pVCpu->cpum.GstCtx.cx = pa16Mem[6];
4139 pVCpu->cpum.GstCtx.ax = pa16Mem[7];
4140
4141 /* Descriptor caches are at offset 0x36, 6 bytes per entry. */
4142 iemLoadall286SetDescCache(pVCpu, X86_SREG_ES, pbMem + 0x36);
4143 iemLoadall286SetDescCache(pVCpu, X86_SREG_CS, pbMem + 0x3C);
4144 iemLoadall286SetDescCache(pVCpu, X86_SREG_SS, pbMem + 0x42);
4145 iemLoadall286SetDescCache(pVCpu, X86_SREG_DS, pbMem + 0x48);
4146
4147 /* GDTR contents are at offset 0x4E, 6 bytes. */
4148 RTGCPHYS GCPtrBase;
4149 uint16_t cbLimit;
4150 pa8Mem = pbMem + 0x4E;
4151 /* NB: Fourth byte "should be zero"; we are ignoring it. */
4152 GCPtrBase = pa8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16);
4153 cbLimit = pa8Mem[4] + (pa8Mem[5] << 8);
4154 CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
4155
4156 /* IDTR contents are at offset 0x5A, 6 bytes. */
4157 pa8Mem = pbMem + 0x5A;
4158 GCPtrBase = pa8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16);
4159 cbLimit = pa8Mem[4] + (pa8Mem[5] << 8);
4160 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
4161
4162 Log(("LOADALL: GDTR:%08RX64/%04X, IDTR:%08RX64/%04X\n", pVCpu->cpum.GstCtx.gdtr.pGdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.idtr.pIdt, pVCpu->cpum.GstCtx.idtr.cbIdt));
4163 Log(("LOADALL: CS:%04X, CS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.cs.u64Base, pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.Attr.u));
4164 Log(("LOADALL: DS:%04X, DS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.ds.u64Base, pVCpu->cpum.GstCtx.ds.u32Limit, pVCpu->cpum.GstCtx.ds.Attr.u));
4165 Log(("LOADALL: ES:%04X, ES base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.es.Sel, pVCpu->cpum.GstCtx.es.u64Base, pVCpu->cpum.GstCtx.es.u32Limit, pVCpu->cpum.GstCtx.es.Attr.u));
4166 Log(("LOADALL: SS:%04X, SS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u));
4167 Log(("LOADALL: SI:%04X, DI:%04X, AX:%04X, BX:%04X, CX:%04X, DX:%04X\n", pVCpu->cpum.GstCtx.si, pVCpu->cpum.GstCtx.di, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.cx, pVCpu->cpum.GstCtx.dx));
4168
4169 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pbMem, IEM_ACCESS_SYS_R);
4170 if (rcStrict != VINF_SUCCESS)
4171 return rcStrict;
4172
4173 /*
4174 * The CPL may change and protected mode may change enabled. It is taken
4175 * from the "DPL fields of the SS and CS descriptor caches" but there is no
4176 * word as to what happens if those are not identical (probably bad things).
4177 */
4178 iemRecalcExecModeAndCplFlags(pVCpu);
4179 Assert(IEM_IS_16BIT_CODE(pVCpu));
4180
4181 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS | CPUM_CHANGED_IDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_TR | CPUM_CHANGED_LDTR);
4182
4183 /* Flush the prefetch buffer. */
4184 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4185
4186/** @todo single stepping */
4187 return rcStrict;
4188}
4189
4190
4191/**
4192 * Implements SYSCALL (AMD and Intel64).
4193 */
4194IEM_CIMPL_DEF_0(iemCImpl_syscall)
4195{
4196 /** @todo hack, LOADALL should be decoded as such on a 286. */
4197 if (RT_UNLIKELY(pVCpu->iem.s.uTargetCpu == IEMTARGETCPU_286))
4198 return iemCImpl_loadall286(pVCpu, cbInstr);
4199
4200 /*
4201 * Check preconditions.
4202 *
4203 * Note that CPUs described in the documentation may load a few odd values
4204 * into CS and SS than we allow here. This has yet to be checked on real
4205 * hardware.
4206 */
4207 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE))
4208 {
4209 Log(("syscall: Not enabled in EFER -> #UD\n"));
4210 return iemRaiseUndefinedOpcode(pVCpu);
4211 }
4212 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4213 {
4214 Log(("syscall: Protected mode is required -> #GP(0)\n"));
4215 return iemRaiseGeneralProtectionFault0(pVCpu);
4216 }
4217 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4218 {
4219 Log(("syscall: Only available in long mode on intel -> #UD\n"));
4220 return iemRaiseUndefinedOpcode(pVCpu);
4221 }
4222
4223 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS);
4224
4225 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
4226 /** @todo what about LDT selectors? Shouldn't matter, really. */
4227 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4228 uint16_t uNewSs = uNewCs + 8;
4229 if (uNewCs == 0 || uNewSs == 0)
4230 {
4231 /** @todo Neither Intel nor AMD document this check. */
4232 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4233 return iemRaiseGeneralProtectionFault0(pVCpu);
4234 }
4235
4236 /* Long mode and legacy mode differs. */
4237 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4238 {
4239 uint64_t uNewRip = IEM_IS_64BIT_CODE(pVCpu) ? pVCpu->cpum.GstCtx.msrLSTAR : pVCpu->cpum.GstCtx. msrCSTAR;
4240
4241 /* This test isn't in the docs, but I'm not trusting the guys writing
4242 the MSRs to have validated the values as canonical like they should. */
4243 if (!IEM_IS_CANONICAL(uNewRip))
4244 {
4245 /** @todo Intel claims this can't happen because IA32_LSTAR MSR can't be written with non-canonical address. */
4246 Log(("syscall: New RIP not canonical -> #UD\n"));
4247 return iemRaiseUndefinedOpcode(pVCpu);
4248 }
4249
4250 /*
4251 * Commit it.
4252 */
4253 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, uNewRip));
4254 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.rip + cbInstr;
4255 pVCpu->cpum.GstCtx.rip = uNewRip;
4256
4257 pVCpu->cpum.GstCtx.rflags.u &= ~X86_EFL_RF;
4258 pVCpu->cpum.GstCtx.r11 = pVCpu->cpum.GstCtx.rflags.u;
4259 pVCpu->cpum.GstCtx.rflags.u &= ~pVCpu->cpum.GstCtx.msrSFMASK;
4260 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_RA1_MASK;
4261
4262 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
4263 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
4264
4265 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4266 | IEM_F_MODE_X86_64BIT;
4267 }
4268 else
4269 {
4270 /*
4271 * Commit it.
4272 */
4273 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, (uint32_t)(pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
4274 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.eip + cbInstr;
4275 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
4276 pVCpu->cpum.GstCtx.rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
4277
4278 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
4279 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
4280
4281 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4282 | IEM_F_MODE_X86_32BIT_PROT
4283 | iemCalc32BitFlatIndicatorEsDs(pVCpu);
4284 }
4285 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
4286 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
4287 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4288 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4289 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4290
4291 pVCpu->cpum.GstCtx.ss.Sel = uNewSs;
4292 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs;
4293 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4294 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4295 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4296
4297 /* Flush the prefetch buffer. */
4298 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4299
4300/** @todo single step */
4301 return VINF_SUCCESS;
4302}
4303
4304
4305/**
4306 * Implements SYSRET (AMD and Intel64).
4307 *
4308 * @param enmEffOpSize The effective operand size.
4309 */
4310IEM_CIMPL_DEF_1(iemCImpl_sysret, IEMMODE, enmEffOpSize)
4311
4312{
4313 RT_NOREF_PV(cbInstr);
4314
4315 /*
4316 * Check preconditions.
4317 *
4318 * Note that CPUs described in the documentation may load a few odd values
4319 * into CS and SS than we allow here. This has yet to be checked on real
4320 * hardware.
4321 */
4322 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE))
4323 {
4324 Log(("sysret: Not enabled in EFER -> #UD\n"));
4325 return iemRaiseUndefinedOpcode(pVCpu);
4326 }
4327 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4328 {
4329 Log(("sysret: Only available in long mode on intel -> #UD\n"));
4330 return iemRaiseUndefinedOpcode(pVCpu);
4331 }
4332 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4333 {
4334 Log(("sysret: Protected mode is required -> #GP(0)\n"));
4335 return iemRaiseGeneralProtectionFault0(pVCpu);
4336 }
4337 if (IEM_GET_CPL(pVCpu) != 0)
4338 {
4339 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
4340 return iemRaiseGeneralProtectionFault0(pVCpu);
4341 }
4342
4343 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS);
4344
4345 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
4346 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4347 uint16_t uNewSs = uNewCs + 8;
4348 if (enmEffOpSize == IEMMODE_64BIT)
4349 uNewCs += 16;
4350 if (uNewCs == 0 || uNewSs == 0)
4351 {
4352 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4353 return iemRaiseGeneralProtectionFault0(pVCpu);
4354 }
4355
4356 /*
4357 * Commit it.
4358 */
4359 bool f32Bit = true;
4360 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4361 {
4362 if (enmEffOpSize == IEMMODE_64BIT)
4363 {
4364 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.r11));
4365 /* Note! We disregard intel manual regarding the RCX canonical
4366 check, ask intel+xen why AMD doesn't do it. */
4367 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx;
4368 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4369 | (3 << X86DESCATTR_DPL_SHIFT);
4370 f32Bit = false;
4371 }
4372 else
4373 {
4374 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.r11));
4375 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.ecx;
4376 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4377 | (3 << X86DESCATTR_DPL_SHIFT);
4378 }
4379 /** @todo testcase: See what kind of flags we can make SYSRET restore and
4380 * what it really ignores. RF and VM are hinted at being zero, by AMD.
4381 * Intel says: RFLAGS := (R11 & 3C7FD7H) | 2; */
4382 pVCpu->cpum.GstCtx.rflags.u = pVCpu->cpum.GstCtx.r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
4383 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_RA1_MASK;
4384 }
4385 else
4386 {
4387 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx));
4388 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx;
4389 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_IF;
4390 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4391 | (3 << X86DESCATTR_DPL_SHIFT);
4392 }
4393 pVCpu->cpum.GstCtx.cs.Sel = uNewCs | 3;
4394 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs | 3;
4395 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4396 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4397 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4398
4399 pVCpu->cpum.GstCtx.ss.Sel = uNewSs | 3;
4400 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs | 3;
4401 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4402 /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */
4403 pVCpu->cpum.GstCtx.ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
4404 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
4405 * on sysret. */
4406 /** @todo intel documents SS.BASE and SS.LIMIT as being set as well as the
4407 * TYPE, S, DPL, P, B and G flag bits. */
4408
4409 if (!f32Bit)
4410 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4411 | (3 << IEM_F_X86_CPL_SHIFT)
4412 | IEM_F_MODE_X86_64BIT;
4413 else
4414 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4415 | (3 << IEM_F_X86_CPL_SHIFT)
4416 | IEM_F_MODE_X86_32BIT_PROT
4417 /** @todo sort out the SS.BASE/LIM/ATTR claim by AMD and maybe we can switch to
4418 * iemCalc32BitFlatIndicatorDsEs and move this up into the above branch. */
4419 | iemCalc32BitFlatIndicator(pVCpu);
4420
4421 /* Flush the prefetch buffer. */
4422 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4423
4424/** @todo single step */
4425 return VINF_SUCCESS;
4426}
4427
4428
4429/**
4430 * Implements SYSENTER (Intel, 32-bit AMD).
4431 */
4432IEM_CIMPL_DEF_0(iemCImpl_sysenter)
4433{
4434 RT_NOREF(cbInstr);
4435
4436 /*
4437 * Check preconditions.
4438 *
4439 * Note that CPUs described in the documentation may load a few odd values
4440 * into CS and SS than we allow here. This has yet to be checked on real
4441 * hardware.
4442 */
4443 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter)
4444 {
4445 Log(("sysenter: not supported -=> #UD\n"));
4446 return iemRaiseUndefinedOpcode(pVCpu);
4447 }
4448 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4449 {
4450 Log(("sysenter: Protected or long mode is required -> #GP(0)\n"));
4451 return iemRaiseGeneralProtectionFault0(pVCpu);
4452 }
4453 bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
4454 if (IEM_IS_GUEST_CPU_AMD(pVCpu) && fIsLongMode)
4455 {
4456 Log(("sysenter: Only available in protected mode on AMD -> #UD\n"));
4457 return iemRaiseUndefinedOpcode(pVCpu);
4458 }
4459 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4460 uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs;
4461 if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0)
4462 {
4463 Log(("sysenter: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs));
4464 return iemRaiseGeneralProtectionFault0(pVCpu);
4465 }
4466
4467 /* This test isn't in the docs, it's just a safeguard against missing
4468 canonical checks when writing the registers. */
4469 if (RT_LIKELY( !fIsLongMode
4470 || ( IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.eip)
4471 && IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.esp))))
4472 { /* likely */ }
4473 else
4474 {
4475 Log(("sysenter: SYSENTER_EIP = %#RX64 or/and SYSENTER_ESP = %#RX64 not canonical -> #GP(0)\n",
4476 pVCpu->cpum.GstCtx.SysEnter.eip, pVCpu->cpum.GstCtx.SysEnter.esp));
4477 return iemRaiseUndefinedOpcode(pVCpu);
4478 }
4479
4480/** @todo Test: Sysenter from ring-0, ring-1 and ring-2. */
4481
4482 /*
4483 * Update registers and commit.
4484 */
4485 if (fIsLongMode)
4486 {
4487 Log(("sysenter: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
4488 pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, pVCpu->cpum.GstCtx.SysEnter.eip));
4489 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.SysEnter.eip;
4490 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.SysEnter.esp;
4491 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4492 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC;
4493 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4494 | IEM_F_MODE_X86_64BIT;
4495 }
4496 else
4497 {
4498 Log(("sysenter: %04x:%08RX32 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs, (uint32_t)pVCpu->cpum.GstCtx.rip,
4499 pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip));
4500 pVCpu->cpum.GstCtx.rip = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip;
4501 pVCpu->cpum.GstCtx.rsp = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.esp;
4502 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4503 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC;
4504 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4505 | IEM_F_MODE_X86_32BIT
4506 | iemCalc32BitFlatIndicatorEsDs(pVCpu);
4507 }
4508 pVCpu->cpum.GstCtx.cs.Sel = uNewCs & X86_SEL_MASK_OFF_RPL;
4509 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs & X86_SEL_MASK_OFF_RPL;
4510 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4511 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4512 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4513
4514 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs & X86_SEL_MASK_OFF_RPL) + 8;
4515 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs & X86_SEL_MASK_OFF_RPL) + 8;
4516 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4517 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4518 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4519 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC;
4520 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4521
4522 pVCpu->cpum.GstCtx.rflags.Bits.u1IF = 0;
4523 pVCpu->cpum.GstCtx.rflags.Bits.u1VM = 0;
4524 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
4525
4526 /* Flush the prefetch buffer. */
4527 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4528
4529/** @todo single stepping */
4530 return VINF_SUCCESS;
4531}
4532
4533
4534/**
4535 * Implements SYSEXIT (Intel, 32-bit AMD).
4536 *
4537 * @param enmEffOpSize The effective operand size.
4538 */
4539IEM_CIMPL_DEF_1(iemCImpl_sysexit, IEMMODE, enmEffOpSize)
4540{
4541 RT_NOREF(cbInstr);
4542
4543 /*
4544 * Check preconditions.
4545 *
4546 * Note that CPUs described in the documentation may load a few odd values
4547 * into CS and SS than we allow here. This has yet to be checked on real
4548 * hardware.
4549 */
4550 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter)
4551 {
4552 Log(("sysexit: not supported -=> #UD\n"));
4553 return iemRaiseUndefinedOpcode(pVCpu);
4554 }
4555 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4556 {
4557 Log(("sysexit: Protected or long mode is required -> #GP(0)\n"));
4558 return iemRaiseGeneralProtectionFault0(pVCpu);
4559 }
4560 bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
4561 if (IEM_IS_GUEST_CPU_AMD(pVCpu) && fIsLongMode)
4562 {
4563 Log(("sysexit: Only available in protected mode on AMD -> #UD\n"));
4564 return iemRaiseUndefinedOpcode(pVCpu);
4565 }
4566 if (IEM_GET_CPL(pVCpu) != 0)
4567 {
4568 Log(("sysexit: CPL(=%u) != 0 -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
4569 return iemRaiseGeneralProtectionFault0(pVCpu);
4570 }
4571 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4572 uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs;
4573 if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0)
4574 {
4575 Log(("sysexit: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs));
4576 return iemRaiseGeneralProtectionFault0(pVCpu);
4577 }
4578
4579 /*
4580 * Update registers and commit.
4581 */
4582 if (enmEffOpSize == IEMMODE_64BIT)
4583 {
4584 Log(("sysexit: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
4585 pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 32, pVCpu->cpum.GstCtx.rcx));
4586 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rdx;
4587 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.rcx;
4588 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4589 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4590 pVCpu->cpum.GstCtx.cs.Sel = (uNewCs | 3) + 32;
4591 pVCpu->cpum.GstCtx.cs.ValidSel = (uNewCs | 3) + 32;
4592 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs | 3) + 40;
4593 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs | 3) + 40;
4594
4595 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4596 | (3 << IEM_F_X86_CPL_SHIFT)
4597 | IEM_F_MODE_X86_64BIT;
4598 }
4599 else
4600 {
4601 Log(("sysexit: %04x:%08RX64 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
4602 pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 16, (uint32_t)pVCpu->cpum.GstCtx.edx));
4603 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.edx;
4604 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.ecx;
4605 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4606 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4607 pVCpu->cpum.GstCtx.cs.Sel = (uNewCs | 3) + 16;
4608 pVCpu->cpum.GstCtx.cs.ValidSel = (uNewCs | 3) + 16;
4609 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs | 3) + 24;
4610 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs | 3) + 24;
4611
4612 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4613 | (3 << IEM_F_X86_CPL_SHIFT)
4614 | IEM_F_MODE_X86_32BIT
4615 | iemCalc32BitFlatIndicatorEsDs(pVCpu);
4616 }
4617 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4618 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4619 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4620
4621 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4622 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4623 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4624 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4625 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4626 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
4627
4628/** @todo single stepping */
4629
4630 /* Flush the prefetch buffer. */
4631 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4632
4633 return VINF_SUCCESS;
4634}
4635
4636
4637/**
4638 * Completes a MOV SReg,XXX or POP SReg instruction.
4639 *
4640 * When not modifying SS or when we're already in an interrupt shadow we
4641 * can update RIP and finish the instruction the normal way.
4642 *
4643 * Otherwise, the MOV/POP SS interrupt shadow that we now enable will block
4644 * both TF and DBx events. The TF will be ignored while the DBx ones will
4645 * be delayed till the next instruction boundrary. For more details see
4646 * @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching Stacks}.
4647 */
4648DECLINLINE(VBOXSTRICTRC) iemCImpl_LoadSRegFinish(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iSegReg)
4649{
4650 if (iSegReg != X86_SREG_SS || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
4651 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
4652
4653 iemRegAddToRip(pVCpu, cbInstr);
4654 pVCpu->cpum.GstCtx.eflags.uBoth &= ~X86_EFL_RF; /* Shadow int isn't set and DRx is delayed, so only clear RF. */
4655 CPUMSetInInterruptShadowSs(&pVCpu->cpum.GstCtx);
4656
4657 return VINF_SUCCESS;
4658}
4659
4660
4661/**
4662 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
4663 *
4664 * @param pVCpu The cross context virtual CPU structure of the calling
4665 * thread.
4666 * @param iSegReg The segment register number (valid).
4667 * @param uSel The new selector value.
4668 */
4669static VBOXSTRICTRC iemCImpl_LoadSRegWorker(PVMCPUCC pVCpu, uint8_t iSegReg, uint16_t uSel)
4670{
4671 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4672 uint16_t *pSel = iemSRegRef(pVCpu, iSegReg);
4673 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4674
4675 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
4676
4677 /*
4678 * Real mode and V8086 mode are easy.
4679 */
4680 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4681 {
4682 *pSel = uSel;
4683 pHid->u64Base = (uint32_t)uSel << 4;
4684 pHid->ValidSel = uSel;
4685 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4686#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
4687 /** @todo Does the CPU actually load limits and attributes in the
4688 * real/V8086 mode segment load case? It doesn't for CS in far
4689 * jumps... Affects unreal mode. */
4690 pHid->u32Limit = 0xffff;
4691 pHid->Attr.u = 0;
4692 pHid->Attr.n.u1Present = 1;
4693 pHid->Attr.n.u1DescType = 1;
4694 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
4695 ? X86_SEL_TYPE_RW
4696 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
4697#endif
4698
4699 /* Update the FLAT 32-bit mode flag, if we're in 32-bit unreal mode (unlikely): */
4700 if (RT_LIKELY(!IEM_IS_32BIT_CODE(pVCpu)))
4701 { /* likely */ }
4702 else if (uSel != 0)
4703 pVCpu->iem.s.fExec &= ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK;
4704 else
4705 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK)
4706 | iemCalc32BitFlatIndicator(pVCpu);
4707 }
4708 /*
4709 * Protected / long mode - null segment.
4710 *
4711 * Check if it's a null segment selector value first, that's OK for DS, ES,
4712 * FS and GS. If not null, then we have to load and parse the descriptor.
4713 */
4714 else if (!(uSel & X86_SEL_MASK_OFF_RPL))
4715 {
4716 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
4717 if (iSegReg == X86_SREG_SS)
4718 {
4719 /* In 64-bit kernel mode, the stack can be 0 because of the way
4720 interrupts are dispatched. AMD seems to have a slighly more
4721 relaxed relationship to SS.RPL than intel does. */
4722 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
4723 if ( !IEM_IS_64BIT_CODE(pVCpu)
4724 || IEM_GET_CPL(pVCpu) > 2
4725 || ( uSel != IEM_GET_CPL(pVCpu)
4726 && !IEM_IS_GUEST_CPU_AMD(pVCpu)) )
4727 {
4728 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
4729 return iemRaiseGeneralProtectionFault0(pVCpu);
4730 }
4731 }
4732
4733 *pSel = uSel; /* Not RPL, remember :-) */
4734 iemHlpLoadNullDataSelectorProt(pVCpu, pHid, uSel);
4735 if (iSegReg == X86_SREG_SS)
4736 pHid->Attr.u |= IEM_GET_CPL(pVCpu) << X86DESCATTR_DPL_SHIFT;
4737
4738 /* This will affect the FLAT 32-bit mode flag: */
4739 if ( iSegReg < X86_SREG_FS
4740 && IEM_IS_32BIT_CODE(pVCpu))
4741 pVCpu->iem.s.fExec &= ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK;
4742 }
4743 /*
4744 * Protected / long mode.
4745 */
4746 else
4747 {
4748 /* Fetch the descriptor. */
4749 IEMSELDESC Desc;
4750 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
4751 if (rcStrict != VINF_SUCCESS)
4752 return rcStrict;
4753
4754 /* Check GPs first. */
4755 if (!Desc.Legacy.Gen.u1DescType)
4756 {
4757 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
4758 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4759 }
4760 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
4761 {
4762 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4763 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
4764 {
4765 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
4766 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4767 }
4768 if ((uSel & X86_SEL_RPL) != IEM_GET_CPL(pVCpu))
4769 {
4770 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, IEM_GET_CPL(pVCpu)));
4771 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4772 }
4773 if (Desc.Legacy.Gen.u2Dpl != IEM_GET_CPL(pVCpu))
4774 {
4775 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
4776 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4777 }
4778 }
4779 else
4780 {
4781 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4782 {
4783 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
4784 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4785 }
4786 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4787 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4788 {
4789#if 0 /* this is what intel says. */
4790 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4791 && IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl)
4792 {
4793 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
4794 iSegReg, uSel, (uSel & X86_SEL_RPL), IEM_GET_CPL(pVCpu), Desc.Legacy.Gen.u2Dpl));
4795 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4796 }
4797#else /* this is what makes more sense. */
4798 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4799 {
4800 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4801 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4802 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4803 }
4804 if (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl)
4805 {
4806 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4807 iSegReg, uSel, IEM_GET_CPL(pVCpu), Desc.Legacy.Gen.u2Dpl));
4808 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4809 }
4810#endif
4811 }
4812 }
4813
4814 /* Is it there? */
4815 if (!Desc.Legacy.Gen.u1Present)
4816 {
4817 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4818 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
4819 }
4820
4821 /* The base and limit. */
4822 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4823 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
4824
4825 /*
4826 * Ok, everything checked out fine. Now set the accessed bit before
4827 * committing the result into the registers.
4828 */
4829 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4830 {
4831 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
4832 if (rcStrict != VINF_SUCCESS)
4833 return rcStrict;
4834 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4835 }
4836
4837 /* commit */
4838 *pSel = uSel;
4839 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4840 pHid->u32Limit = cbLimit;
4841 pHid->u64Base = u64Base;
4842 pHid->ValidSel = uSel;
4843 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4844
4845 /** @todo check if the hidden bits are loaded correctly for 64-bit
4846 * mode. */
4847
4848 /* This will affect the FLAT 32-bit mode flag: */
4849 if ( iSegReg < X86_SREG_FS
4850 && IEM_IS_32BIT_CODE(pVCpu))
4851 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK)
4852 | iemCalc32BitFlatIndicator(pVCpu);
4853 }
4854
4855 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4856 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4857 return VINF_SUCCESS;
4858}
4859
4860
4861/**
4862 * Implements 'mov SReg, r/m'.
4863 *
4864 * @param iSegReg The segment register number (valid).
4865 * @param uSel The new selector value.
4866 */
4867IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
4868{
4869 VBOXSTRICTRC rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4870 if (rcStrict == VINF_SUCCESS)
4871 rcStrict = iemCImpl_LoadSRegFinish(pVCpu, cbInstr, iSegReg);
4872 return rcStrict;
4873}
4874
4875
4876/**
4877 * Implements 'pop SReg'.
4878 *
4879 * @param iSegReg The segment register number (valid).
4880 * @param enmEffOpSize The efficient operand size (valid).
4881 */
4882IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4883{
4884 VBOXSTRICTRC rcStrict;
4885
4886 /*
4887 * Read the selector off the stack and join paths with mov ss, reg.
4888 */
4889 RTUINT64U TmpRsp;
4890 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
4891 switch (enmEffOpSize)
4892 {
4893 case IEMMODE_16BIT:
4894 {
4895 uint16_t uSel;
4896 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp);
4897 if (rcStrict == VINF_SUCCESS)
4898 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4899 break;
4900 }
4901
4902 case IEMMODE_32BIT:
4903 {
4904 uint32_t u32Value;
4905 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp);
4906 if (rcStrict == VINF_SUCCESS)
4907 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u32Value);
4908 break;
4909 }
4910
4911 case IEMMODE_64BIT:
4912 {
4913 uint64_t u64Value;
4914 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp);
4915 if (rcStrict == VINF_SUCCESS)
4916 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u64Value);
4917 break;
4918 }
4919 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4920 }
4921
4922 /*
4923 * If the load succeeded, commit the stack change and finish the instruction.
4924 */
4925 if (rcStrict == VINF_SUCCESS)
4926 {
4927 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
4928 rcStrict = iemCImpl_LoadSRegFinish(pVCpu, cbInstr, iSegReg);
4929 }
4930
4931 return rcStrict;
4932}
4933
4934
4935/**
4936 * Implements lgs, lfs, les, lds & lss.
4937 */
4938IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg, uint16_t, uSel, uint64_t, offSeg, uint8_t, iSegReg, uint8_t, iGReg, IEMMODE, enmEffOpSize)
4939{
4940 /*
4941 * Use iemCImpl_LoadSRegWorker to do the tricky segment register loading.
4942 */
4943 /** @todo verify and test that mov, pop and lXs works the segment
4944 * register loading in the exact same way. */
4945 VBOXSTRICTRC rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4946 if (rcStrict == VINF_SUCCESS)
4947 {
4948 switch (enmEffOpSize)
4949 {
4950 case IEMMODE_16BIT:
4951 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4952 break;
4953 case IEMMODE_32BIT:
4954 case IEMMODE_64BIT:
4955 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4956 break;
4957 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4958 }
4959 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
4960 }
4961 return rcStrict;
4962}
4963
4964
4965/**
4966 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
4967 *
4968 * @retval VINF_SUCCESS on success.
4969 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
4970 * @retval iemMemFetchSysU64 return value.
4971 *
4972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4973 * @param uSel The selector value.
4974 * @param fAllowSysDesc Whether system descriptors are OK or not.
4975 * @param pDesc Where to return the descriptor on success.
4976 */
4977static VBOXSTRICTRC iemCImpl_LoadDescHelper(PVMCPUCC pVCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
4978{
4979 pDesc->Long.au64[0] = 0;
4980 pDesc->Long.au64[1] = 0;
4981
4982 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */
4983 return VINF_IEM_SELECTOR_NOT_OK;
4984
4985 /* Within the table limits? */
4986 RTGCPTR GCPtrBase;
4987 if (uSel & X86_SEL_LDT)
4988 {
4989 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
4990 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
4991 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
4992 return VINF_IEM_SELECTOR_NOT_OK;
4993 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
4994 }
4995 else
4996 {
4997 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR);
4998 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
4999 return VINF_IEM_SELECTOR_NOT_OK;
5000 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
5001 }
5002
5003 /* Fetch the descriptor. */
5004 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
5005 if (rcStrict != VINF_SUCCESS)
5006 return rcStrict;
5007 if (!pDesc->Legacy.Gen.u1DescType)
5008 {
5009 if (!fAllowSysDesc)
5010 return VINF_IEM_SELECTOR_NOT_OK;
5011 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
5012 {
5013 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
5014 if (rcStrict != VINF_SUCCESS)
5015 return rcStrict;
5016 }
5017
5018 }
5019
5020 return VINF_SUCCESS;
5021}
5022
5023
5024/**
5025 * Implements verr (fWrite = false) and verw (fWrite = true).
5026 */
5027IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite)
5028{
5029 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
5030
5031 /** @todo figure whether the accessed bit is set or not. */
5032
5033 bool fAccessible = true;
5034 IEMSELDESC Desc;
5035 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc);
5036 if (rcStrict == VINF_SUCCESS)
5037 {
5038 /* Check the descriptor, order doesn't matter much here. */
5039 if ( !Desc.Legacy.Gen.u1DescType
5040 || !Desc.Legacy.Gen.u1Present)
5041 fAccessible = false;
5042 else
5043 {
5044 if ( fWrite
5045 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE
5046 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
5047 fAccessible = false;
5048
5049 /** @todo testcase for the conforming behavior. */
5050 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
5051 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
5052 {
5053 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
5054 fAccessible = false;
5055 else if (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl)
5056 fAccessible = false;
5057 }
5058 }
5059
5060 }
5061 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
5062 fAccessible = false;
5063 else
5064 return rcStrict;
5065
5066 /* commit */
5067 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fAccessible;
5068
5069 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5070}
5071
5072
5073/**
5074 * Implements LAR and LSL with 64-bit operand size.
5075 *
5076 * @returns VINF_SUCCESS.
5077 * @param pu64Dst Pointer to the destination register.
5078 * @param uSel The selector to load details for.
5079 * @param fIsLar true = LAR, false = LSL.
5080 */
5081IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar)
5082{
5083 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
5084
5085 /** @todo figure whether the accessed bit is set or not. */
5086
5087 bool fDescOk = true;
5088 IEMSELDESC Desc;
5089 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, true /*fAllowSysDesc*/, &Desc);
5090 if (rcStrict == VINF_SUCCESS)
5091 {
5092 /*
5093 * Check the descriptor type.
5094 */
5095 if (!Desc.Legacy.Gen.u1DescType)
5096 {
5097 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
5098 {
5099 if (Desc.Long.Gen.u5Zeros)
5100 fDescOk = false;
5101 else
5102 switch (Desc.Long.Gen.u4Type)
5103 {
5104 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
5105 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
5106 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
5107 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
5108 break;
5109 case AMD64_SEL_TYPE_SYS_CALL_GATE:
5110 fDescOk = fIsLar;
5111 break;
5112 default:
5113 fDescOk = false;
5114 break;
5115 }
5116 }
5117 else
5118 {
5119 switch (Desc.Long.Gen.u4Type)
5120 {
5121 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
5122 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
5123 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
5124 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
5125 case X86_SEL_TYPE_SYS_LDT:
5126 break;
5127 case X86_SEL_TYPE_SYS_286_CALL_GATE:
5128 case X86_SEL_TYPE_SYS_TASK_GATE:
5129 case X86_SEL_TYPE_SYS_386_CALL_GATE:
5130 fDescOk = fIsLar;
5131 break;
5132 default:
5133 fDescOk = false;
5134 break;
5135 }
5136 }
5137 }
5138 if (fDescOk)
5139 {
5140 /*
5141 * Check the RPL/DPL/CPL interaction..
5142 */
5143 /** @todo testcase for the conforming behavior. */
5144 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
5145 || !Desc.Legacy.Gen.u1DescType)
5146 {
5147 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
5148 fDescOk = false;
5149 else if (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl)
5150 fDescOk = false;
5151 }
5152 }
5153
5154 if (fDescOk)
5155 {
5156 /*
5157 * All fine, start committing the result.
5158 */
5159 if (fIsLar)
5160 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00);
5161 else
5162 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy);
5163 }
5164
5165 }
5166 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
5167 fDescOk = false;
5168 else
5169 return rcStrict;
5170
5171 /* commit flags value and advance rip. */
5172 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fDescOk;
5173 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5174}
5175
5176
5177/**
5178 * Implements LAR and LSL with 16-bit operand size.
5179 *
5180 * @returns VINF_SUCCESS.
5181 * @param pu16Dst Pointer to the destination register.
5182 * @param uSel The selector to load details for.
5183 * @param fIsLar true = LAR, false = LSL.
5184 */
5185IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar)
5186{
5187 uint64_t u64TmpDst = *pu16Dst;
5188 IEM_CIMPL_CALL_3(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, fIsLar);
5189 *pu16Dst = u64TmpDst;
5190 return VINF_SUCCESS;
5191}
5192
5193
5194/**
5195 * Implements lgdt.
5196 *
5197 * @param iEffSeg The segment of the new gdtr contents
5198 * @param GCPtrEffSrc The address of the new gdtr contents.
5199 * @param enmEffOpSize The effective operand size.
5200 */
5201IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
5202{
5203 if (IEM_GET_CPL(pVCpu) != 0)
5204 return iemRaiseGeneralProtectionFault0(pVCpu);
5205 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5206
5207 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5208 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5209 {
5210 Log(("lgdt: Guest intercept -> VM-exit\n"));
5211 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_GDTR_IDTR_ACCESS, VMXINSTRID_LGDT, cbInstr);
5212 }
5213
5214 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_WRITES))
5215 {
5216 Log(("lgdt: Guest intercept -> #VMEXIT\n"));
5217 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5218 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5219 }
5220
5221 /*
5222 * Fetch the limit and base address.
5223 */
5224 uint16_t cbLimit;
5225 RTGCPTR GCPtrBase;
5226 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
5227 if (rcStrict == VINF_SUCCESS)
5228 {
5229 if ( !IEM_IS_64BIT_CODE(pVCpu)
5230 || X86_IS_CANONICAL(GCPtrBase))
5231 {
5232 rcStrict = CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
5233 if (rcStrict == VINF_SUCCESS)
5234 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5235 }
5236 else
5237 {
5238 Log(("iemCImpl_lgdt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
5239 return iemRaiseGeneralProtectionFault0(pVCpu);
5240 }
5241 }
5242 return rcStrict;
5243}
5244
5245
5246/**
5247 * Implements sgdt.
5248 *
5249 * @param iEffSeg The segment where to store the gdtr content.
5250 * @param GCPtrEffDst The address where to store the gdtr content.
5251 */
5252IEM_CIMPL_DEF_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5253{
5254 /*
5255 * Join paths with sidt.
5256 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
5257 * you really must know.
5258 */
5259 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5260 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5261 {
5262 Log(("sgdt: Guest intercept -> VM-exit\n"));
5263 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_GDTR_IDTR_ACCESS, VMXINSTRID_SGDT, cbInstr);
5264 }
5265
5266 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_READS))
5267 {
5268 Log(("sgdt: Guest intercept -> #VMEXIT\n"));
5269 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5270 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5271 }
5272
5273 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR);
5274 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.gdtr.pGdt, iEffSeg, GCPtrEffDst);
5275 if (rcStrict == VINF_SUCCESS)
5276 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5277 return rcStrict;
5278}
5279
5280
5281/**
5282 * Implements lidt.
5283 *
5284 * @param iEffSeg The segment of the new idtr contents
5285 * @param GCPtrEffSrc The address of the new idtr contents.
5286 * @param enmEffOpSize The effective operand size.
5287 */
5288IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
5289{
5290 if (IEM_GET_CPL(pVCpu) != 0)
5291 return iemRaiseGeneralProtectionFault0(pVCpu);
5292 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5293
5294 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_WRITES))
5295 {
5296 Log(("lidt: Guest intercept -> #VMEXIT\n"));
5297 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5298 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5299 }
5300
5301 /*
5302 * Fetch the limit and base address.
5303 */
5304 uint16_t cbLimit;
5305 RTGCPTR GCPtrBase;
5306 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
5307 if (rcStrict == VINF_SUCCESS)
5308 {
5309 if ( !IEM_IS_64BIT_CODE(pVCpu)
5310 || X86_IS_CANONICAL(GCPtrBase))
5311 {
5312 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
5313 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5314 }
5315 else
5316 {
5317 Log(("iemCImpl_lidt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
5318 return iemRaiseGeneralProtectionFault0(pVCpu);
5319 }
5320 }
5321 return rcStrict;
5322}
5323
5324
5325/**
5326 * Implements sidt.
5327 *
5328 * @param iEffSeg The segment where to store the idtr content.
5329 * @param GCPtrEffDst The address where to store the idtr content.
5330 */
5331IEM_CIMPL_DEF_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5332{
5333 /*
5334 * Join paths with sgdt.
5335 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
5336 * you really must know.
5337 */
5338 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
5339 {
5340 Log(("sidt: Guest intercept -> #VMEXIT\n"));
5341 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5342 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5343 }
5344
5345 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_IDTR);
5346 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.idtr.cbIdt, pVCpu->cpum.GstCtx.idtr.pIdt, iEffSeg, GCPtrEffDst);
5347 if (rcStrict == VINF_SUCCESS)
5348 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5349 return rcStrict;
5350}
5351
5352
5353/**
5354 * Implements lldt.
5355 *
5356 * @param uNewLdt The new LDT selector value.
5357 */
5358IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
5359{
5360 /*
5361 * Check preconditions.
5362 */
5363 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5364 {
5365 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
5366 return iemRaiseUndefinedOpcode(pVCpu);
5367 }
5368 if (IEM_GET_CPL(pVCpu) != 0)
5369 {
5370 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, IEM_GET_CPL(pVCpu)));
5371 return iemRaiseGeneralProtectionFault0(pVCpu);
5372 }
5373 /* Nested-guest VMX intercept. */
5374 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5375 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5376 {
5377 Log(("lldt: Guest intercept -> VM-exit\n"));
5378 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_LLDT, cbInstr);
5379 }
5380 if (uNewLdt & X86_SEL_LDT)
5381 {
5382 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
5383 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewLdt);
5384 }
5385
5386 /*
5387 * Now, loading a NULL selector is easy.
5388 */
5389 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
5390 {
5391 /* Nested-guest SVM intercept. */
5392 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
5393 {
5394 Log(("lldt: Guest intercept -> #VMEXIT\n"));
5395 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5396 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5397 }
5398
5399 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
5400 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_LDTR;
5401 CPUMSetGuestLDTR(pVCpu, uNewLdt);
5402 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
5403 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
5404 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
5405 {
5406 /* AMD-V seems to leave the base and limit alone. */
5407 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
5408 }
5409 else
5410 {
5411 /* VT-x (Intel 3960x) seems to be doing the following. */
5412 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;
5413 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
5414 pVCpu->cpum.GstCtx.ldtr.u32Limit = UINT32_MAX;
5415 }
5416
5417 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5418 }
5419
5420 /*
5421 * Read the descriptor.
5422 */
5423 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR);
5424 IEMSELDESC Desc;
5425 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
5426 if (rcStrict != VINF_SUCCESS)
5427 return rcStrict;
5428
5429 /* Check GPs first. */
5430 if (Desc.Legacy.Gen.u1DescType)
5431 {
5432 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
5433 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5434 }
5435 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
5436 {
5437 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
5438 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5439 }
5440 uint64_t u64Base;
5441 if (!IEM_IS_LONG_MODE(pVCpu))
5442 u64Base = X86DESC_BASE(&Desc.Legacy);
5443 else
5444 {
5445 if (Desc.Long.Gen.u5Zeros)
5446 {
5447 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
5448 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5449 }
5450
5451 u64Base = X86DESC64_BASE(&Desc.Long);
5452 if (!IEM_IS_CANONICAL(u64Base))
5453 {
5454 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
5455 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5456 }
5457 }
5458
5459 /* NP */
5460 if (!Desc.Legacy.Gen.u1Present)
5461 {
5462 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
5463 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewLdt);
5464 }
5465
5466 /* Nested-guest SVM intercept. */
5467 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
5468 {
5469 Log(("lldt: Guest intercept -> #VMEXIT\n"));
5470 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5471 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5472 }
5473
5474 /*
5475 * It checks out alright, update the registers.
5476 */
5477/** @todo check if the actual value is loaded or if the RPL is dropped */
5478 CPUMSetGuestLDTR(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5479 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
5480 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
5481 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5482 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5483 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
5484
5485 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5486}
5487
5488
5489/**
5490 * Implements sldt GReg
5491 *
5492 * @param iGReg The general register to store the CRx value in.
5493 * @param enmEffOpSize The operand size.
5494 */
5495IEM_CIMPL_DEF_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5496{
5497 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5498 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5499 {
5500 Log(("sldt: Guest intercept -> VM-exit\n"));
5501 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_SLDT, cbInstr);
5502 }
5503
5504 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0, cbInstr);
5505
5506 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
5507 switch (enmEffOpSize)
5508 {
5509 case IEMMODE_16BIT: *(uint16_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.ldtr.Sel; break;
5510 case IEMMODE_32BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.ldtr.Sel; break;
5511 case IEMMODE_64BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.ldtr.Sel; break;
5512 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5513 }
5514 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5515}
5516
5517
5518/**
5519 * Implements sldt mem.
5520 *
5521 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5522 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5523 */
5524IEM_CIMPL_DEF_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5525{
5526 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0, cbInstr);
5527
5528 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
5529 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, pVCpu->cpum.GstCtx.ldtr.Sel);
5530 if (rcStrict == VINF_SUCCESS)
5531 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5532 return rcStrict;
5533}
5534
5535
5536/**
5537 * Implements ltr.
5538 *
5539 * @param uNewTr The new TSS selector value.
5540 */
5541IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
5542{
5543 /*
5544 * Check preconditions.
5545 */
5546 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5547 {
5548 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
5549 return iemRaiseUndefinedOpcode(pVCpu);
5550 }
5551 if (IEM_GET_CPL(pVCpu) != 0)
5552 {
5553 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, IEM_GET_CPL(pVCpu)));
5554 return iemRaiseGeneralProtectionFault0(pVCpu);
5555 }
5556 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5557 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5558 {
5559 Log(("ltr: Guest intercept -> VM-exit\n"));
5560 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_LTR, cbInstr);
5561 }
5562 if (uNewTr & X86_SEL_LDT)
5563 {
5564 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
5565 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewTr);
5566 }
5567 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
5568 {
5569 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
5570 return iemRaiseGeneralProtectionFault0(pVCpu);
5571 }
5572 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TR_WRITES))
5573 {
5574 Log(("ltr: Guest intercept -> #VMEXIT\n"));
5575 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5576 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5577 }
5578
5579 /*
5580 * Read the descriptor.
5581 */
5582 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_TR);
5583 IEMSELDESC Desc;
5584 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
5585 if (rcStrict != VINF_SUCCESS)
5586 return rcStrict;
5587
5588 /* Check GPs first. */
5589 if (Desc.Legacy.Gen.u1DescType)
5590 {
5591 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5592 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5593 }
5594 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
5595 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
5596 || IEM_IS_LONG_MODE(pVCpu)) )
5597 {
5598 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5599 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5600 }
5601 uint64_t u64Base;
5602 if (!IEM_IS_LONG_MODE(pVCpu))
5603 u64Base = X86DESC_BASE(&Desc.Legacy);
5604 else
5605 {
5606 if (Desc.Long.Gen.u5Zeros)
5607 {
5608 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
5609 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5610 }
5611
5612 u64Base = X86DESC64_BASE(&Desc.Long);
5613 if (!IEM_IS_CANONICAL(u64Base))
5614 {
5615 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
5616 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5617 }
5618 }
5619
5620 /* NP */
5621 if (!Desc.Legacy.Gen.u1Present)
5622 {
5623 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
5624 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewTr);
5625 }
5626
5627 /*
5628 * Set it busy.
5629 * Note! Intel says this should lock down the whole descriptor, but we'll
5630 * restrict our selves to 32-bit for now due to lack of inline
5631 * assembly and such.
5632 */
5633 void *pvDesc;
5634 rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pVCpu->cpum.GstCtx.gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL),
5635 IEM_ACCESS_DATA_RW, 0);
5636 if (rcStrict != VINF_SUCCESS)
5637 return rcStrict;
5638 switch ((uintptr_t)pvDesc & 3)
5639 {
5640 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
5641 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
5642 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
5643 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
5644 }
5645 rcStrict = iemMemCommitAndUnmap(pVCpu, pvDesc, IEM_ACCESS_DATA_RW);
5646 if (rcStrict != VINF_SUCCESS)
5647 return rcStrict;
5648 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
5649
5650 /*
5651 * It checks out alright, update the registers.
5652 */
5653/** @todo check if the actual value is loaded or if the RPL is dropped */
5654 CPUMSetGuestTR(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5655 pVCpu->cpum.GstCtx.tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
5656 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
5657 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5658 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5659 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
5660
5661 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5662}
5663
5664
5665/**
5666 * Implements str GReg
5667 *
5668 * @param iGReg The general register to store the CRx value in.
5669 * @param enmEffOpSize The operand size.
5670 */
5671IEM_CIMPL_DEF_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5672{
5673 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5674 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5675 {
5676 Log(("str_reg: Guest intercept -> VM-exit\n"));
5677 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_STR, cbInstr);
5678 }
5679
5680 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0, cbInstr);
5681
5682 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
5683 switch (enmEffOpSize)
5684 {
5685 case IEMMODE_16BIT: *(uint16_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.tr.Sel; break;
5686 case IEMMODE_32BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.tr.Sel; break;
5687 case IEMMODE_64BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.tr.Sel; break;
5688 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5689 }
5690 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5691}
5692
5693
5694/**
5695 * Implements str mem.
5696 *
5697 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5698 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5699 */
5700IEM_CIMPL_DEF_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5701{
5702 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5703 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5704 {
5705 Log(("str_mem: Guest intercept -> VM-exit\n"));
5706 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_STR, cbInstr);
5707 }
5708
5709 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0, cbInstr);
5710
5711 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
5712 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, pVCpu->cpum.GstCtx.tr.Sel);
5713 if (rcStrict == VINF_SUCCESS)
5714 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5715 return rcStrict;
5716}
5717
5718
5719/**
5720 * Implements mov GReg,CRx.
5721 *
5722 * @param iGReg The general register to store the CRx value in.
5723 * @param iCrReg The CRx register to read (valid).
5724 */
5725IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
5726{
5727 if (IEM_GET_CPL(pVCpu) != 0)
5728 return iemRaiseGeneralProtectionFault0(pVCpu);
5729 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5730
5731 if (IEM_SVM_IS_READ_CR_INTERCEPT_SET(pVCpu, iCrReg))
5732 {
5733 Log(("iemCImpl_mov_Rd_Cd: Guest intercept CR%u -> #VMEXIT\n", iCrReg));
5734 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5735 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);
5736 }
5737
5738 /* Read it. */
5739 uint64_t crX;
5740 switch (iCrReg)
5741 {
5742 case 0:
5743 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5744 crX = pVCpu->cpum.GstCtx.cr0;
5745 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
5746 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */
5747 break;
5748 case 2:
5749 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR2);
5750 crX = pVCpu->cpum.GstCtx.cr2;
5751 break;
5752 case 3:
5753 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
5754 crX = pVCpu->cpum.GstCtx.cr3;
5755 break;
5756 case 4:
5757 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5758 crX = pVCpu->cpum.GstCtx.cr4;
5759 break;
5760 case 8:
5761 {
5762 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
5763#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5764 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5765 {
5766 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovFromCr8(pVCpu, iGReg, cbInstr);
5767 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5768 return rcStrict;
5769
5770 /*
5771 * If the Mov-from-CR8 doesn't cause a VM-exit, bits 7:4 of the VTPR is copied
5772 * to bits 0:3 of the destination operand. Bits 63:4 of the destination operand
5773 * are cleared.
5774 *
5775 * See Intel Spec. 29.3 "Virtualizing CR8-based TPR Accesses"
5776 */
5777 if (IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
5778 {
5779 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
5780 crX = (uTpr >> 4) & 0xf;
5781 break;
5782 }
5783 }
5784#endif
5785#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5786 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5787 {
5788 PCSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl;
5789 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu)))
5790 {
5791 crX = pVmcbCtrl->IntCtrl.n.u8VTPR & 0xf;
5792 break;
5793 }
5794 }
5795#endif
5796 uint8_t uTpr;
5797 int rc = APICGetTpr(pVCpu, &uTpr, NULL, NULL);
5798 if (RT_SUCCESS(rc))
5799 crX = uTpr >> 4;
5800 else
5801 crX = 0;
5802 break;
5803 }
5804 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5805 }
5806
5807#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5808 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5809 {
5810 switch (iCrReg)
5811 {
5812 /* CR0/CR4 reads are subject to masking when in VMX non-root mode. */
5813 case 0: crX = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u); break;
5814 case 4: crX = CPUMGetGuestVmxMaskedCr4(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr4Mask.u); break;
5815
5816 case 3:
5817 {
5818 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovFromCr3(pVCpu, iGReg, cbInstr);
5819 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5820 return rcStrict;
5821 break;
5822 }
5823 }
5824 }
5825#endif
5826
5827 /* Store it. */
5828 if (IEM_IS_64BIT_CODE(pVCpu))
5829 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = crX;
5830 else
5831 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)crX;
5832
5833 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5834}
5835
5836
5837/**
5838 * Implements smsw GReg.
5839 *
5840 * @param iGReg The general register to store the CRx value in.
5841 * @param enmEffOpSize The operand size.
5842 */
5843IEM_CIMPL_DEF_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5844{
5845 IEM_SVM_CHECK_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */, cbInstr);
5846
5847#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5848 uint64_t u64MaskedCr0;
5849 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5850 u64MaskedCr0 = pVCpu->cpum.GstCtx.cr0;
5851 else
5852 u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u);
5853 uint64_t const u64GuestCr0 = u64MaskedCr0;
5854#else
5855 uint64_t const u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
5856#endif
5857
5858 switch (enmEffOpSize)
5859 {
5860 case IEMMODE_16BIT:
5861 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
5862 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = (uint16_t)u64GuestCr0;
5863 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5864 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = (uint16_t)u64GuestCr0 | 0xffe0;
5865 else
5866 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = (uint16_t)u64GuestCr0 | 0xfff0;
5867 break;
5868
5869 case IEMMODE_32BIT:
5870 *(uint32_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)u64GuestCr0;
5871 break;
5872
5873 case IEMMODE_64BIT:
5874 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = u64GuestCr0;
5875 break;
5876
5877 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5878 }
5879
5880 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5881}
5882
5883
5884/**
5885 * Implements smsw mem.
5886 *
5887 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5888 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5889 */
5890IEM_CIMPL_DEF_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5891{
5892 IEM_SVM_CHECK_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */, cbInstr);
5893
5894#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5895 uint64_t u64MaskedCr0;
5896 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5897 u64MaskedCr0 = pVCpu->cpum.GstCtx.cr0;
5898 else
5899 u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u);
5900 uint64_t const u64GuestCr0 = u64MaskedCr0;
5901#else
5902 uint64_t const u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
5903#endif
5904
5905 uint16_t u16Value;
5906 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
5907 u16Value = (uint16_t)u64GuestCr0;
5908 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5909 u16Value = (uint16_t)u64GuestCr0 | 0xffe0;
5910 else
5911 u16Value = (uint16_t)u64GuestCr0 | 0xfff0;
5912
5913 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, u16Value);
5914 if (rcStrict == VINF_SUCCESS)
5915 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5916 return rcStrict;
5917}
5918
5919
5920/**
5921 * Helper for mapping CR3 and PAE PDPEs for 'mov CRx,GReg'.
5922 */
5923#define IEM_MAP_PAE_PDPES_AT_CR3_RET(a_pVCpu, a_iCrReg, a_uCr3) \
5924 do \
5925 { \
5926 int const rcX = PGMGstMapPaePdpesAtCr3(a_pVCpu, a_uCr3); \
5927 if (RT_SUCCESS(rcX)) \
5928 { /* likely */ } \
5929 else \
5930 { \
5931 /* Either invalid PDPTEs or CR3 second-level translation failed. Raise #GP(0) either way. */ \
5932 Log(("iemCImpl_load_Cr%#x: Trying to load invalid PAE PDPEs\n", a_iCrReg)); \
5933 return iemRaiseGeneralProtectionFault0(a_pVCpu); \
5934 } \
5935 } while (0)
5936
5937
5938/**
5939 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
5940 *
5941 * @param iCrReg The CRx register to write (valid).
5942 * @param uNewCrX The new value.
5943 * @param enmAccessCrX The instruction that caused the CrX load.
5944 * @param iGReg The general register in case of a 'mov CRx,GReg'
5945 * instruction.
5946 */
5947IEM_CIMPL_DEF_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg)
5948{
5949 VBOXSTRICTRC rcStrict;
5950 int rc;
5951#ifndef VBOX_WITH_NESTED_HWVIRT_SVM
5952 RT_NOREF2(iGReg, enmAccessCrX);
5953#endif
5954
5955 /*
5956 * Try store it.
5957 * Unfortunately, CPUM only does a tiny bit of the work.
5958 */
5959 switch (iCrReg)
5960 {
5961 case 0:
5962 {
5963 /*
5964 * Perform checks.
5965 */
5966 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5967
5968 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr0;
5969 uint32_t const fValid = CPUMGetGuestCR0ValidMask();
5970
5971 /* ET is hardcoded on 486 and later. */
5972 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_486)
5973 uNewCrX |= X86_CR0_ET;
5974 /* The 386 and 486 didn't #GP(0) on attempting to set reserved CR0 bits. ET was settable on 386. */
5975 else if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_486)
5976 {
5977 uNewCrX &= fValid;
5978 uNewCrX |= X86_CR0_ET;
5979 }
5980 else
5981 uNewCrX &= X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG | X86_CR0_ET;
5982
5983 /* Check for reserved bits. */
5984 if (uNewCrX & ~(uint64_t)fValid)
5985 {
5986 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5987 return iemRaiseGeneralProtectionFault0(pVCpu);
5988 }
5989
5990 /* Check for invalid combinations. */
5991 if ( (uNewCrX & X86_CR0_PG)
5992 && !(uNewCrX & X86_CR0_PE) )
5993 {
5994 Log(("Trying to set CR0.PG without CR0.PE\n"));
5995 return iemRaiseGeneralProtectionFault0(pVCpu);
5996 }
5997
5998 if ( !(uNewCrX & X86_CR0_CD)
5999 && (uNewCrX & X86_CR0_NW) )
6000 {
6001 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
6002 return iemRaiseGeneralProtectionFault0(pVCpu);
6003 }
6004
6005 if ( !(uNewCrX & X86_CR0_PG)
6006 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE))
6007 {
6008 Log(("Trying to clear CR0.PG while leaving CR4.PCID set\n"));
6009 return iemRaiseGeneralProtectionFault0(pVCpu);
6010 }
6011
6012 /* Long mode consistency checks. */
6013 if ( (uNewCrX & X86_CR0_PG)
6014 && !(uOldCrX & X86_CR0_PG)
6015 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) )
6016 {
6017 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE))
6018 {
6019 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
6020 return iemRaiseGeneralProtectionFault0(pVCpu);
6021 }
6022 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long)
6023 {
6024 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
6025 return iemRaiseGeneralProtectionFault0(pVCpu);
6026 }
6027 }
6028
6029 /** @todo testcase: what happens if we disable paging while in 64-bit code? */
6030
6031#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6032 /* Check for bits that must remain set or cleared in VMX operation,
6033 see Intel spec. 23.8 "Restrictions on VMX operation". */
6034 if (IEM_VMX_IS_ROOT_MODE(pVCpu))
6035 {
6036 uint64_t const uCr0Fixed0 = iemVmxGetCr0Fixed0(pVCpu, IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
6037 if ((uNewCrX & uCr0Fixed0) != uCr0Fixed0)
6038 {
6039 Log(("Trying to clear reserved CR0 bits in VMX operation: NewCr0=%#llx MB1=%#llx\n", uNewCrX, uCr0Fixed0));
6040 return iemRaiseGeneralProtectionFault0(pVCpu);
6041 }
6042
6043 uint64_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
6044 if (uNewCrX & ~uCr0Fixed1)
6045 {
6046 Log(("Trying to set reserved CR0 bits in VMX operation: NewCr0=%#llx MB0=%#llx\n", uNewCrX, uCr0Fixed1));
6047 return iemRaiseGeneralProtectionFault0(pVCpu);
6048 }
6049 }
6050#endif
6051
6052 /*
6053 * SVM nested-guest CR0 write intercepts.
6054 */
6055 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, iCrReg))
6056 {
6057 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6058 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6059 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);
6060 }
6061 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
6062 {
6063 /* 'lmsw' intercepts regardless of whether the TS/MP bits are actually toggled. */
6064 if ( enmAccessCrX == IEMACCESSCRX_LMSW
6065 || (uNewCrX & ~(X86_CR0_TS | X86_CR0_MP)) != (uOldCrX & ~(X86_CR0_TS | X86_CR0_MP)))
6066 {
6067 Assert(enmAccessCrX != IEMACCESSCRX_CLTS);
6068 Log(("iemCImpl_load_Cr%#x: lmsw or bits other than TS/MP changed: Guest intercept -> #VMEXIT\n", iCrReg));
6069 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6070 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_CR0_SEL_WRITE, enmAccessCrX, iGReg);
6071 }
6072 }
6073
6074 /*
6075 * Change EFER.LMA if entering or leaving long mode.
6076 */
6077 uint64_t NewEFER = pVCpu->cpum.GstCtx.msrEFER;
6078 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
6079 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) )
6080 {
6081 if (uNewCrX & X86_CR0_PG)
6082 NewEFER |= MSR_K6_EFER_LMA;
6083 else
6084 NewEFER &= ~MSR_K6_EFER_LMA;
6085
6086 CPUMSetGuestEFER(pVCpu, NewEFER);
6087 Assert(pVCpu->cpum.GstCtx.msrEFER == NewEFER);
6088 }
6089
6090 /*
6091 * Inform PGM.
6092 */
6093 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW))
6094 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW)) )
6095 {
6096 if ( enmAccessCrX != IEMACCESSCRX_MOV_CRX
6097 || !CPUMIsPaePagingEnabled(uNewCrX, pVCpu->cpum.GstCtx.cr4, NewEFER)
6098 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6099 { /* likely */ }
6100 else
6101 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
6102 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
6103 AssertRCReturn(rc, rc);
6104 /* ignore informational status codes */
6105 }
6106
6107 /*
6108 * Change CR0.
6109 */
6110 CPUMSetGuestCR0(pVCpu, uNewCrX);
6111 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCrX);
6112
6113 /* Update the fExec flags if PE changed. */
6114 if ((uNewCrX ^ uOldCrX) & X86_CR0_PE)
6115 iemRecalcExecModeAndCplFlags(pVCpu);
6116
6117 /*
6118 * Inform PGM some more...
6119 */
6120 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
6121 false /* fForce */);
6122 break;
6123 }
6124
6125 /*
6126 * CR2 can be changed without any restrictions.
6127 */
6128 case 2:
6129 {
6130 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 2))
6131 {
6132 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6133 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6134 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);
6135 }
6136 pVCpu->cpum.GstCtx.cr2 = uNewCrX;
6137 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_CR2;
6138 rcStrict = VINF_SUCCESS;
6139 break;
6140 }
6141
6142 /*
6143 * CR3 is relatively simple, although AMD and Intel have different
6144 * accounts of how setting reserved bits are handled. We take intel's
6145 * word for the lower bits and AMD's for the high bits (63:52). The
6146 * lower reserved bits are ignored and left alone; OpenBSD 5.8 relies
6147 * on this.
6148 */
6149 /** @todo Testcase: Setting reserved bits in CR3, especially before
6150 * enabling paging. */
6151 case 3:
6152 {
6153 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
6154
6155 /* Bit 63 being clear in the source operand with PCIDE indicates no invalidations are required. */
6156 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE)
6157 && (uNewCrX & RT_BIT_64(63)))
6158 {
6159 /** @todo r=ramshankar: avoiding a TLB flush altogether here causes Windows 10
6160 * SMP(w/o nested-paging) to hang during bootup on Skylake systems, see
6161 * Intel spec. 4.10.4.1 "Operations that Invalidate TLBs and
6162 * Paging-Structure Caches". */
6163 uNewCrX &= ~RT_BIT_64(63);
6164 }
6165
6166 /* Check / mask the value. */
6167#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6168 /* See Intel spec. 27.2.2 "EPT Translation Mechanism" footnote. */
6169 uint64_t const fInvPhysMask = !CPUMIsGuestVmxEptPagingEnabledEx(IEM_GET_CTX(pVCpu))
6170 ? (UINT64_MAX << IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
6171 : (~X86_CR3_EPT_PAGE_MASK & X86_PAGE_4K_BASE_MASK);
6172#else
6173 uint64_t const fInvPhysMask = UINT64_C(0xfff0000000000000);
6174#endif
6175 if (uNewCrX & fInvPhysMask)
6176 {
6177 /** @todo Should we raise this only for 64-bit mode like Intel claims? AMD is
6178 * very vague in this area. As mentioned above, need testcase on real
6179 * hardware... Sigh. */
6180 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
6181 return iemRaiseGeneralProtectionFault0(pVCpu);
6182 }
6183
6184 uint64_t fValid;
6185 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
6186 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME))
6187 {
6188 /** @todo Redundant? This value has already been validated above. */
6189 fValid = UINT64_C(0x000fffffffffffff);
6190 }
6191 else
6192 fValid = UINT64_C(0xffffffff);
6193 if (uNewCrX & ~fValid)
6194 {
6195 Log(("Automatically clearing reserved MBZ bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
6196 uNewCrX, uNewCrX & ~fValid));
6197 uNewCrX &= fValid;
6198 }
6199
6200 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 3))
6201 {
6202 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6203 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6204 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);
6205 }
6206
6207 /* Inform PGM. */
6208 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG)
6209 {
6210 if ( !CPUMIsGuestInPAEModeEx(IEM_GET_CTX(pVCpu))
6211 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6212 { /* likely */ }
6213 else
6214 {
6215 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
6216 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, uNewCrX);
6217 }
6218 rc = PGMFlushTLB(pVCpu, uNewCrX, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
6219 AssertRCReturn(rc, rc);
6220 /* ignore informational status codes */
6221 }
6222
6223 /* Make the change. */
6224 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
6225 AssertRCSuccessReturn(rc, rc);
6226
6227 rcStrict = VINF_SUCCESS;
6228 break;
6229 }
6230
6231 /*
6232 * CR4 is a bit more tedious as there are bits which cannot be cleared
6233 * under some circumstances and such.
6234 */
6235 case 4:
6236 {
6237 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6238 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr4;
6239
6240 /* Reserved bits. */
6241 uint32_t const fValid = CPUMGetGuestCR4ValidMask(pVCpu->CTX_SUFF(pVM));
6242 if (uNewCrX & ~(uint64_t)fValid)
6243 {
6244 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
6245 return iemRaiseGeneralProtectionFault0(pVCpu);
6246 }
6247
6248 bool const fPcide = !(uOldCrX & X86_CR4_PCIDE) && (uNewCrX & X86_CR4_PCIDE);
6249 bool const fLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
6250
6251 /* PCIDE check. */
6252 if ( fPcide
6253 && ( !fLongMode
6254 || (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff))))
6255 {
6256 Log(("Trying to set PCIDE with invalid PCID or outside long mode. Pcid=%#x\n", (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff))));
6257 return iemRaiseGeneralProtectionFault0(pVCpu);
6258 }
6259
6260 /* PAE check. */
6261 if ( fLongMode
6262 && (uOldCrX & X86_CR4_PAE)
6263 && !(uNewCrX & X86_CR4_PAE))
6264 {
6265 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
6266 return iemRaiseGeneralProtectionFault0(pVCpu);
6267 }
6268
6269 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 4))
6270 {
6271 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6272 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6273 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
6274 }
6275
6276 /* Check for bits that must remain set or cleared in VMX operation,
6277 see Intel spec. 23.8 "Restrictions on VMX operation". */
6278 if (IEM_VMX_IS_ROOT_MODE(pVCpu))
6279 {
6280 uint64_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
6281 if ((uNewCrX & uCr4Fixed0) != uCr4Fixed0)
6282 {
6283 Log(("Trying to clear reserved CR4 bits in VMX operation: NewCr4=%#llx MB1=%#llx\n", uNewCrX, uCr4Fixed0));
6284 return iemRaiseGeneralProtectionFault0(pVCpu);
6285 }
6286
6287 uint64_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
6288 if (uNewCrX & ~uCr4Fixed1)
6289 {
6290 Log(("Trying to set reserved CR4 bits in VMX operation: NewCr4=%#llx MB0=%#llx\n", uNewCrX, uCr4Fixed1));
6291 return iemRaiseGeneralProtectionFault0(pVCpu);
6292 }
6293 }
6294
6295 /*
6296 * Notify PGM.
6297 */
6298 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */))
6299 {
6300 if ( !CPUMIsPaePagingEnabled(pVCpu->cpum.GstCtx.cr0, uNewCrX, pVCpu->cpum.GstCtx.msrEFER)
6301 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6302 { /* likely */ }
6303 else
6304 {
6305 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
6306 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
6307 }
6308 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
6309 AssertRCReturn(rc, rc);
6310 /* ignore informational status codes */
6311 }
6312
6313 /*
6314 * Change it.
6315 */
6316 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
6317 AssertRCSuccessReturn(rc, rc);
6318 Assert(pVCpu->cpum.GstCtx.cr4 == uNewCrX);
6319
6320 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
6321 false /* fForce */);
6322 break;
6323 }
6324
6325 /*
6326 * CR8 maps to the APIC TPR.
6327 */
6328 case 8:
6329 {
6330 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
6331 if (uNewCrX & ~(uint64_t)0xf)
6332 {
6333 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
6334 return iemRaiseGeneralProtectionFault0(pVCpu);
6335 }
6336
6337#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6338 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6339 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
6340 {
6341 /*
6342 * If the Mov-to-CR8 doesn't cause a VM-exit, bits 0:3 of the source operand
6343 * is copied to bits 7:4 of the VTPR. Bits 0:3 and bits 31:8 of the VTPR are
6344 * cleared. Following this the processor performs TPR virtualization.
6345 *
6346 * However, we should not perform TPR virtualization immediately here but
6347 * after this instruction has completed.
6348 *
6349 * See Intel spec. 29.3 "Virtualizing CR8-based TPR Accesses"
6350 * See Intel spec. 27.1 "Architectural State Before A VM-exit"
6351 */
6352 uint32_t const uTpr = (uNewCrX & 0xf) << 4;
6353 Log(("iemCImpl_load_Cr%#x: Virtualizing TPR (%#x) write\n", iCrReg, uTpr));
6354 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uTpr);
6355 iemVmxVirtApicSetPendingWrite(pVCpu, XAPIC_OFF_TPR);
6356 rcStrict = VINF_SUCCESS;
6357 break;
6358 }
6359#endif
6360
6361#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6362 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6363 {
6364 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 8))
6365 {
6366 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6367 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6368 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);
6369 }
6370
6371 pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u8VTPR = uNewCrX;
6372 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu)))
6373 {
6374 rcStrict = VINF_SUCCESS;
6375 break;
6376 }
6377 }
6378#endif
6379 uint8_t const u8Tpr = (uint8_t)uNewCrX << 4;
6380 APICSetTpr(pVCpu, u8Tpr);
6381 rcStrict = VINF_SUCCESS;
6382 break;
6383 }
6384
6385 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
6386 }
6387
6388 /*
6389 * Advance the RIP on success.
6390 */
6391 if (RT_SUCCESS(rcStrict))
6392 {
6393 if (rcStrict != VINF_SUCCESS)
6394 iemSetPassUpStatus(pVCpu, rcStrict);
6395 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6396 }
6397
6398 return rcStrict;
6399}
6400
6401
6402/**
6403 * Implements mov CRx,GReg.
6404 *
6405 * @param iCrReg The CRx register to write (valid).
6406 * @param iGReg The general register to load the CRx value from.
6407 */
6408IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
6409{
6410 if (IEM_GET_CPL(pVCpu) != 0)
6411 return iemRaiseGeneralProtectionFault0(pVCpu);
6412 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6413
6414 /*
6415 * Read the new value from the source register and call common worker.
6416 */
6417 uint64_t uNewCrX;
6418 if (IEM_IS_64BIT_CODE(pVCpu))
6419 uNewCrX = iemGRegFetchU64(pVCpu, iGReg);
6420 else
6421 uNewCrX = iemGRegFetchU32(pVCpu, iGReg);
6422
6423#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6424 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6425 {
6426 VBOXSTRICTRC rcStrict = VINF_VMX_INTERCEPT_NOT_ACTIVE;
6427 switch (iCrReg)
6428 {
6429 case 0:
6430 case 4: rcStrict = iemVmxVmexitInstrMovToCr0Cr4(pVCpu, iCrReg, &uNewCrX, iGReg, cbInstr); break;
6431 case 3: rcStrict = iemVmxVmexitInstrMovToCr3(pVCpu, uNewCrX, iGReg, cbInstr); break;
6432 case 8: rcStrict = iemVmxVmexitInstrMovToCr8(pVCpu, iGReg, cbInstr); break;
6433 }
6434 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6435 return rcStrict;
6436 }
6437#endif
6438
6439 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, iCrReg, uNewCrX, IEMACCESSCRX_MOV_CRX, iGReg);
6440}
6441
6442
6443/**
6444 * Implements 'LMSW r/m16'
6445 *
6446 * @param u16NewMsw The new value.
6447 * @param GCPtrEffDst The guest-linear address of the source operand in case
6448 * of a memory operand. For register operand, pass
6449 * NIL_RTGCPTR.
6450 */
6451IEM_CIMPL_DEF_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst)
6452{
6453 if (IEM_GET_CPL(pVCpu) != 0)
6454 return iemRaiseGeneralProtectionFault0(pVCpu);
6455 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6456 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6457
6458#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6459 /* Check nested-guest VMX intercept and get updated MSW if there's no VM-exit. */
6460 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6461 {
6462 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrLmsw(pVCpu, pVCpu->cpum.GstCtx.cr0, &u16NewMsw, GCPtrEffDst, cbInstr);
6463 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6464 return rcStrict;
6465 }
6466#else
6467 RT_NOREF_PV(GCPtrEffDst);
6468#endif
6469
6470 /*
6471 * Compose the new CR0 value and call common worker.
6472 */
6473 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
6474 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
6475 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */);
6476}
6477
6478
6479/**
6480 * Implements 'CLTS'.
6481 */
6482IEM_CIMPL_DEF_0(iemCImpl_clts)
6483{
6484 if (IEM_GET_CPL(pVCpu) != 0)
6485 return iemRaiseGeneralProtectionFault0(pVCpu);
6486
6487 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6488 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0;
6489 uNewCr0 &= ~X86_CR0_TS;
6490
6491#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6492 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6493 {
6494 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrClts(pVCpu, cbInstr);
6495 if (rcStrict == VINF_VMX_MODIFIES_BEHAVIOR)
6496 uNewCr0 |= (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS);
6497 else if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6498 return rcStrict;
6499 }
6500#endif
6501
6502 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_CLTS, UINT8_MAX /* iGReg */);
6503}
6504
6505
6506/**
6507 * Implements mov GReg,DRx.
6508 *
6509 * @param iGReg The general register to store the DRx value in.
6510 * @param iDrReg The DRx register to read (0-7).
6511 */
6512IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
6513{
6514#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6515 /*
6516 * Check nested-guest VMX intercept.
6517 * Unlike most other intercepts, the Mov DRx intercept takes preceedence
6518 * over CPL and CR4.DE and even DR4/DR5 checks.
6519 *
6520 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
6521 */
6522 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6523 {
6524 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovDrX(pVCpu, VMXINSTRID_MOV_FROM_DRX, iDrReg, iGReg, cbInstr);
6525 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6526 return rcStrict;
6527 }
6528#endif
6529
6530 /*
6531 * Check preconditions.
6532 */
6533 /* Raise GPs. */
6534 if (IEM_GET_CPL(pVCpu) != 0)
6535 return iemRaiseGeneralProtectionFault0(pVCpu);
6536 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6537 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6538
6539 /** @todo \#UD in outside ring-0 too? */
6540 if (iDrReg == 4 || iDrReg == 5)
6541 {
6542 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR4);
6543 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE)
6544 {
6545 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
6546 return iemRaiseGeneralProtectionFault0(pVCpu);
6547 }
6548 iDrReg += 2;
6549 }
6550
6551 /* Raise #DB if general access detect is enabled. */
6552 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD)
6553 {
6554 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
6555 return iemRaiseDebugException(pVCpu);
6556 }
6557
6558 /*
6559 * Read the debug register and store it in the specified general register.
6560 */
6561 uint64_t drX;
6562 switch (iDrReg)
6563 {
6564 case 0:
6565 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6566 drX = pVCpu->cpum.GstCtx.dr[0];
6567 break;
6568 case 1:
6569 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6570 drX = pVCpu->cpum.GstCtx.dr[1];
6571 break;
6572 case 2:
6573 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6574 drX = pVCpu->cpum.GstCtx.dr[2];
6575 break;
6576 case 3:
6577 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6578 drX = pVCpu->cpum.GstCtx.dr[3];
6579 break;
6580 case 6:
6581 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
6582 drX = pVCpu->cpum.GstCtx.dr[6];
6583 drX |= X86_DR6_RA1_MASK;
6584 drX &= ~X86_DR6_RAZ_MASK;
6585 break;
6586 case 7:
6587 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6588 drX = pVCpu->cpum.GstCtx.dr[7];
6589 drX |=X86_DR7_RA1_MASK;
6590 drX &= ~X86_DR7_RAZ_MASK;
6591 break;
6592 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* caller checks */
6593 }
6594
6595 /** @todo SVM nested-guest intercept for DR8-DR15? */
6596 /*
6597 * Check for any SVM nested-guest intercepts for the DRx read.
6598 */
6599 if (IEM_SVM_IS_READ_DR_INTERCEPT_SET(pVCpu, iDrReg))
6600 {
6601 Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg));
6602 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6603 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),
6604 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
6605 }
6606
6607 if (IEM_IS_64BIT_CODE(pVCpu))
6608 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = drX;
6609 else
6610 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)drX;
6611
6612 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6613}
6614
6615
6616/**
6617 * Implements mov DRx,GReg.
6618 *
6619 * @param iDrReg The DRx register to write (valid).
6620 * @param iGReg The general register to load the DRx value from.
6621 */
6622IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
6623{
6624#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6625 /*
6626 * Check nested-guest VMX intercept.
6627 * Unlike most other intercepts, the Mov DRx intercept takes preceedence
6628 * over CPL and CR4.DE and even DR4/DR5 checks.
6629 *
6630 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
6631 */
6632 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6633 {
6634 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovDrX(pVCpu, VMXINSTRID_MOV_TO_DRX, iDrReg, iGReg, cbInstr);
6635 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6636 return rcStrict;
6637 }
6638#endif
6639
6640 /*
6641 * Check preconditions.
6642 */
6643 if (IEM_GET_CPL(pVCpu) != 0)
6644 return iemRaiseGeneralProtectionFault0(pVCpu);
6645 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6646 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6647
6648 if (iDrReg == 4 || iDrReg == 5)
6649 {
6650 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR4);
6651 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE)
6652 {
6653 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
6654 return iemRaiseGeneralProtectionFault0(pVCpu);
6655 }
6656 iDrReg += 2;
6657 }
6658
6659 /* Raise #DB if general access detect is enabled. */
6660 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
6661 * \#GP? */
6662 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD)
6663 {
6664 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
6665 return iemRaiseDebugException(pVCpu);
6666 }
6667
6668 /*
6669 * Read the new value from the source register.
6670 */
6671 uint64_t uNewDrX;
6672 if (IEM_IS_64BIT_CODE(pVCpu))
6673 uNewDrX = iemGRegFetchU64(pVCpu, iGReg);
6674 else
6675 uNewDrX = iemGRegFetchU32(pVCpu, iGReg);
6676
6677 /*
6678 * Adjust it.
6679 */
6680 switch (iDrReg)
6681 {
6682 case 0:
6683 case 1:
6684 case 2:
6685 case 3:
6686 /* nothing to adjust */
6687 break;
6688
6689 case 6:
6690 if (uNewDrX & X86_DR6_MBZ_MASK)
6691 {
6692 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
6693 return iemRaiseGeneralProtectionFault0(pVCpu);
6694 }
6695 uNewDrX |= X86_DR6_RA1_MASK;
6696 uNewDrX &= ~X86_DR6_RAZ_MASK;
6697 break;
6698
6699 case 7:
6700 if (uNewDrX & X86_DR7_MBZ_MASK)
6701 {
6702 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
6703 return iemRaiseGeneralProtectionFault0(pVCpu);
6704 }
6705 uNewDrX |= X86_DR7_RA1_MASK;
6706 uNewDrX &= ~X86_DR7_RAZ_MASK;
6707 break;
6708
6709 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6710 }
6711
6712 /** @todo SVM nested-guest intercept for DR8-DR15? */
6713 /*
6714 * Check for any SVM nested-guest intercepts for the DRx write.
6715 */
6716 if (IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(pVCpu, iDrReg))
6717 {
6718 Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg));
6719 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6720 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),
6721 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
6722 }
6723
6724 /*
6725 * Do the actual setting.
6726 */
6727 if (iDrReg < 4)
6728 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6729 else if (iDrReg == 6)
6730 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
6731
6732 int rc = CPUMSetGuestDRx(pVCpu, iDrReg, uNewDrX);
6733 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc);
6734
6735 /*
6736 * Re-init hardware breakpoint summary if it was DR7 that got changed.
6737 */
6738 if (iDrReg == 7)
6739 iemRecalcExecDbgFlags(pVCpu);
6740
6741 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6742}
6743
6744
6745/**
6746 * Implements mov GReg,TRx.
6747 *
6748 * @param iGReg The general register to store the
6749 * TRx value in.
6750 * @param iTrReg The TRx register to read (6/7).
6751 */
6752IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Td, uint8_t, iGReg, uint8_t, iTrReg)
6753{
6754 /*
6755 * Check preconditions. NB: This instruction is 386/486 only.
6756 */
6757
6758 /* Raise GPs. */
6759 if (IEM_GET_CPL(pVCpu) != 0)
6760 return iemRaiseGeneralProtectionFault0(pVCpu);
6761 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6762
6763 if (iTrReg < 6 || iTrReg > 7)
6764 {
6765 /** @todo Do Intel CPUs reject this or are the TRs aliased? */
6766 Log(("mov r%u,tr%u: invalid register -> #GP(0)\n", iGReg, iTrReg));
6767 return iemRaiseGeneralProtectionFault0(pVCpu);
6768 }
6769
6770 /*
6771 * Read the test register and store it in the specified general register.
6772 * This is currently a dummy implementation that only exists to satisfy
6773 * old debuggers like WDEB386 or OS/2 KDB which unconditionally read the
6774 * TR6/TR7 registers. Software which actually depends on the TR values
6775 * (different on 386/486) is exceedingly rare.
6776 */
6777 uint64_t trX;
6778 switch (iTrReg)
6779 {
6780 case 6:
6781 trX = 0; /* Currently a dummy. */
6782 break;
6783 case 7:
6784 trX = 0; /* Currently a dummy. */
6785 break;
6786 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
6787 }
6788
6789 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)trX;
6790
6791 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6792}
6793
6794
6795/**
6796 * Implements mov TRx,GReg.
6797 *
6798 * @param iTrReg The TRx register to write (valid).
6799 * @param iGReg The general register to load the TRx
6800 * value from.
6801 */
6802IEM_CIMPL_DEF_2(iemCImpl_mov_Td_Rd, uint8_t, iTrReg, uint8_t, iGReg)
6803{
6804 /*
6805 * Check preconditions. NB: This instruction is 386/486 only.
6806 */
6807
6808 /* Raise GPs. */
6809 if (IEM_GET_CPL(pVCpu) != 0)
6810 return iemRaiseGeneralProtectionFault0(pVCpu);
6811 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6812
6813 if (iTrReg < 6 || iTrReg > 7)
6814 {
6815 /** @todo Do Intel CPUs reject this or are the TRs aliased? */
6816 Log(("mov r%u,tr%u: invalid register -> #GP(0)\n", iGReg, iTrReg));
6817 return iemRaiseGeneralProtectionFault0(pVCpu);
6818 }
6819
6820 /*
6821 * Read the new value from the source register.
6822 */
6823 uint64_t uNewTrX;
6824 if (IEM_IS_64BIT_CODE(pVCpu)) /** @todo err... 64-bit 386? */
6825 uNewTrX = iemGRegFetchU64(pVCpu, iGReg);
6826 else
6827 uNewTrX = iemGRegFetchU32(pVCpu, iGReg);
6828
6829 /*
6830 * Here we would do the actual setting if this weren't a dummy implementation.
6831 * This is currently a dummy implementation that only exists to prevent
6832 * old debuggers like WDEB386 or OS/2 KDB from crashing.
6833 */
6834 RT_NOREF(uNewTrX);
6835
6836 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6837}
6838
6839
6840/**
6841 * Implements 'INVLPG m'.
6842 *
6843 * @param GCPtrPage The effective address of the page to invalidate.
6844 * @remarks Updates the RIP.
6845 */
6846IEM_CIMPL_DEF_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage)
6847{
6848 /* ring-0 only. */
6849 if (IEM_GET_CPL(pVCpu) != 0)
6850 return iemRaiseGeneralProtectionFault0(pVCpu);
6851 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6852 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
6853
6854#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6855 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6856 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INVLPG_EXIT))
6857 {
6858 Log(("invlpg: Guest intercept (%RGp) -> VM-exit\n", GCPtrPage));
6859 return iemVmxVmexitInstrInvlpg(pVCpu, GCPtrPage, cbInstr);
6860 }
6861#endif
6862
6863 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
6864 {
6865 Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
6866 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6867 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_INVLPG,
6868 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? GCPtrPage : 0, 0 /* uExitInfo2 */);
6869 }
6870
6871 int rc = PGMInvalidatePage(pVCpu, GCPtrPage);
6872 if (rc == VINF_SUCCESS)
6873 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6874 if (rc == VINF_PGM_SYNC_CR3)
6875 {
6876 iemSetPassUpStatus(pVCpu, rc);
6877 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6878 }
6879
6880 AssertMsg(RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6881 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", GCPtrPage, rc));
6882 return rc;
6883}
6884
6885
6886/**
6887 * Implements INVPCID.
6888 *
6889 * @param iEffSeg The segment of the invpcid descriptor.
6890 * @param GCPtrInvpcidDesc The address of invpcid descriptor.
6891 * @param uInvpcidType The invalidation type.
6892 * @remarks Updates the RIP.
6893 */
6894IEM_CIMPL_DEF_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint64_t, uInvpcidType)
6895{
6896 /*
6897 * Check preconditions.
6898 */
6899 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fInvpcid)
6900 return iemRaiseUndefinedOpcode(pVCpu);
6901
6902 /* When in VMX non-root mode and INVPCID is not enabled, it results in #UD. */
6903 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6904 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_INVPCID))
6905 {
6906 Log(("invpcid: Not enabled for nested-guest execution -> #UD\n"));
6907 return iemRaiseUndefinedOpcode(pVCpu);
6908 }
6909
6910 if (IEM_GET_CPL(pVCpu) != 0)
6911 {
6912 Log(("invpcid: CPL != 0 -> #GP(0)\n"));
6913 return iemRaiseGeneralProtectionFault0(pVCpu);
6914 }
6915
6916 if (IEM_IS_V86_MODE(pVCpu))
6917 {
6918 Log(("invpcid: v8086 mode -> #GP(0)\n"));
6919 return iemRaiseGeneralProtectionFault0(pVCpu);
6920 }
6921
6922 /*
6923 * Check nested-guest intercept.
6924 *
6925 * INVPCID causes a VM-exit if "enable INVPCID" and "INVLPG exiting" are
6926 * both set. We have already checked the former earlier in this function.
6927 *
6928 * CPL and virtual-8086 mode checks take priority over this VM-exit.
6929 * See Intel spec. "25.1.1 Relative Priority of Faults and VM Exits".
6930 */
6931 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6932 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INVLPG_EXIT))
6933 {
6934 Log(("invpcid: Guest intercept -> #VM-exit\n"));
6935 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_INVPCID, VMXINSTRID_NONE, cbInstr);
6936 }
6937
6938 if (uInvpcidType > X86_INVPCID_TYPE_MAX_VALID)
6939 {
6940 Log(("invpcid: invalid/unrecognized invpcid type %#RX64 -> #GP(0)\n", uInvpcidType));
6941 return iemRaiseGeneralProtectionFault0(pVCpu);
6942 }
6943 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
6944
6945 /*
6946 * Fetch the invpcid descriptor from guest memory.
6947 */
6948 RTUINT128U uDesc;
6949 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInvpcidDesc);
6950 if (rcStrict == VINF_SUCCESS)
6951 {
6952 /*
6953 * Validate the descriptor.
6954 */
6955 if (uDesc.s.Lo > 0xfff)
6956 {
6957 Log(("invpcid: reserved bits set in invpcid descriptor %#RX64 -> #GP(0)\n", uDesc.s.Lo));
6958 return iemRaiseGeneralProtectionFault0(pVCpu);
6959 }
6960
6961 RTGCUINTPTR64 const GCPtrInvAddr = uDesc.s.Hi;
6962 uint8_t const uPcid = uDesc.s.Lo & UINT64_C(0xfff);
6963 uint32_t const uCr4 = pVCpu->cpum.GstCtx.cr4;
6964 uint64_t const uCr3 = pVCpu->cpum.GstCtx.cr3;
6965 switch (uInvpcidType)
6966 {
6967 case X86_INVPCID_TYPE_INDV_ADDR:
6968 {
6969 if (!IEM_IS_CANONICAL(GCPtrInvAddr))
6970 {
6971 Log(("invpcid: invalidation address %#RGP is not canonical -> #GP(0)\n", GCPtrInvAddr));
6972 return iemRaiseGeneralProtectionFault0(pVCpu);
6973 }
6974 if ( !(uCr4 & X86_CR4_PCIDE)
6975 && uPcid != 0)
6976 {
6977 Log(("invpcid: invalid pcid %#x\n", uPcid));
6978 return iemRaiseGeneralProtectionFault0(pVCpu);
6979 }
6980
6981 /* Invalidate mappings for the linear address tagged with PCID except global translations. */
6982 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6983 break;
6984 }
6985
6986 case X86_INVPCID_TYPE_SINGLE_CONTEXT:
6987 {
6988 if ( !(uCr4 & X86_CR4_PCIDE)
6989 && uPcid != 0)
6990 {
6991 Log(("invpcid: invalid pcid %#x\n", uPcid));
6992 return iemRaiseGeneralProtectionFault0(pVCpu);
6993 }
6994 /* Invalidate all mappings associated with PCID except global translations. */
6995 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6996 break;
6997 }
6998
6999 case X86_INVPCID_TYPE_ALL_CONTEXT_INCL_GLOBAL:
7000 {
7001 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
7002 break;
7003 }
7004
7005 case X86_INVPCID_TYPE_ALL_CONTEXT_EXCL_GLOBAL:
7006 {
7007 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
7008 break;
7009 }
7010 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7011 }
7012 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7013 }
7014 return rcStrict;
7015}
7016
7017
7018/**
7019 * Implements INVD.
7020 */
7021IEM_CIMPL_DEF_0(iemCImpl_invd)
7022{
7023 if (IEM_GET_CPL(pVCpu) != 0)
7024 {
7025 Log(("invd: CPL != 0 -> #GP(0)\n"));
7026 return iemRaiseGeneralProtectionFault0(pVCpu);
7027 }
7028
7029 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7030 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_INVD, cbInstr);
7031
7032 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_INVD, SVM_EXIT_INVD, 0, 0, cbInstr);
7033
7034 /* We currently take no action here. */
7035 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7036}
7037
7038
7039/**
7040 * Implements WBINVD.
7041 */
7042IEM_CIMPL_DEF_0(iemCImpl_wbinvd)
7043{
7044 if (IEM_GET_CPL(pVCpu) != 0)
7045 {
7046 Log(("wbinvd: CPL != 0 -> #GP(0)\n"));
7047 return iemRaiseGeneralProtectionFault0(pVCpu);
7048 }
7049
7050 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7051 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_WBINVD, cbInstr);
7052
7053 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_WBINVD, SVM_EXIT_WBINVD, 0, 0, cbInstr);
7054
7055 /* We currently take no action here. */
7056 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7057}
7058
7059
7060/** Opcode 0x0f 0xaa. */
7061IEM_CIMPL_DEF_0(iemCImpl_rsm)
7062{
7063 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RSM, SVM_EXIT_RSM, 0, 0, cbInstr);
7064 NOREF(cbInstr);
7065 return iemRaiseUndefinedOpcode(pVCpu);
7066}
7067
7068
7069/**
7070 * Implements RDTSC.
7071 */
7072IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
7073{
7074 /*
7075 * Check preconditions.
7076 */
7077 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fTsc)
7078 return iemRaiseUndefinedOpcode(pVCpu);
7079
7080 if (IEM_GET_CPL(pVCpu) != 0)
7081 {
7082 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
7083 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD)
7084 {
7085 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
7086 return iemRaiseGeneralProtectionFault0(pVCpu);
7087 }
7088 }
7089
7090 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7091 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDTSC_EXIT))
7092 {
7093 Log(("rdtsc: Guest intercept -> VM-exit\n"));
7094 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDTSC, cbInstr);
7095 }
7096
7097 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
7098 {
7099 Log(("rdtsc: Guest intercept -> #VMEXIT\n"));
7100 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7101 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7102 }
7103
7104 /*
7105 * Do the job.
7106 */
7107 uint64_t uTicks = TMCpuTickGet(pVCpu);
7108#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
7109 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
7110#endif
7111 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks);
7112 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks);
7113 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX); /* For IEMExecDecodedRdtsc. */
7114 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7115}
7116
7117
7118/**
7119 * Implements RDTSC.
7120 */
7121IEM_CIMPL_DEF_0(iemCImpl_rdtscp)
7122{
7123 /*
7124 * Check preconditions.
7125 */
7126 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fRdTscP)
7127 return iemRaiseUndefinedOpcode(pVCpu);
7128
7129 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7130 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_RDTSCP))
7131 {
7132 Log(("rdtscp: Not enabled for VMX non-root mode -> #UD\n"));
7133 return iemRaiseUndefinedOpcode(pVCpu);
7134 }
7135
7136 if (IEM_GET_CPL(pVCpu) != 0)
7137 {
7138 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
7139 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD)
7140 {
7141 Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
7142 return iemRaiseGeneralProtectionFault0(pVCpu);
7143 }
7144 }
7145
7146 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7147 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDTSC_EXIT))
7148 {
7149 Log(("rdtscp: Guest intercept -> VM-exit\n"));
7150 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDTSCP, cbInstr);
7151 }
7152 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
7153 {
7154 Log(("rdtscp: Guest intercept -> #VMEXIT\n"));
7155 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7156 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7157 }
7158
7159 /*
7160 * Do the job.
7161 * Query the MSR first in case of trips to ring-3.
7162 */
7163 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TSC_AUX);
7164 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pVCpu->cpum.GstCtx.rcx);
7165 if (rcStrict == VINF_SUCCESS)
7166 {
7167 /* Low dword of the TSC_AUX msr only. */
7168 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff);
7169
7170 uint64_t uTicks = TMCpuTickGet(pVCpu);
7171#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
7172 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
7173#endif
7174 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks);
7175 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks);
7176 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX); /* For IEMExecDecodedRdtscp. */
7177 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7178 }
7179 return rcStrict;
7180}
7181
7182
7183/**
7184 * Implements RDPMC.
7185 */
7186IEM_CIMPL_DEF_0(iemCImpl_rdpmc)
7187{
7188 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
7189
7190 if ( IEM_GET_CPL(pVCpu) != 0
7191 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCE))
7192 return iemRaiseGeneralProtectionFault0(pVCpu);
7193
7194 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7195 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDPMC_EXIT))
7196 {
7197 Log(("rdpmc: Guest intercept -> VM-exit\n"));
7198 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDPMC, cbInstr);
7199 }
7200
7201 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
7202 {
7203 Log(("rdpmc: Guest intercept -> #VMEXIT\n"));
7204 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7205 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7206 }
7207
7208 /** @todo Emulate performance counters, for now just return 0. */
7209 pVCpu->cpum.GstCtx.rax = 0;
7210 pVCpu->cpum.GstCtx.rdx = 0;
7211 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
7212 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
7213 * ecx but see @bugref{3472}! */
7214
7215 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7216}
7217
7218
7219/**
7220 * Implements RDMSR.
7221 */
7222IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
7223{
7224 /*
7225 * Check preconditions.
7226 */
7227 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
7228 return iemRaiseUndefinedOpcode(pVCpu);
7229 if (IEM_GET_CPL(pVCpu) != 0)
7230 return iemRaiseGeneralProtectionFault0(pVCpu);
7231
7232 /*
7233 * Check nested-guest intercepts.
7234 */
7235#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7236 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7237 {
7238 if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_RDMSR, pVCpu->cpum.GstCtx.ecx))
7239 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDMSR, cbInstr);
7240 }
7241#endif
7242
7243#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7244 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
7245 {
7246 VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, pVCpu->cpum.GstCtx.ecx, false /* fWrite */, cbInstr);
7247 if (rcStrict == VINF_SVM_VMEXIT)
7248 return VINF_SUCCESS;
7249 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7250 {
7251 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.ecx, VBOXSTRICTRC_VAL(rcStrict)));
7252 return rcStrict;
7253 }
7254 }
7255#endif
7256
7257 /*
7258 * Do the job.
7259 */
7260 RTUINT64U uValue;
7261 /** @todo make CPUMAllMsrs.cpp import the necessary MSR state. */
7262 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
7263
7264 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pVCpu->cpum.GstCtx.ecx, &uValue.u);
7265 if (rcStrict == VINF_SUCCESS)
7266 {
7267 pVCpu->cpum.GstCtx.rax = uValue.s.Lo;
7268 pVCpu->cpum.GstCtx.rdx = uValue.s.Hi;
7269 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
7270
7271 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7272 }
7273
7274#ifndef IN_RING3
7275 /* Deferred to ring-3. */
7276 if (rcStrict == VINF_CPUM_R3_MSR_READ)
7277 {
7278 Log(("IEM: rdmsr(%#x) -> ring-3\n", pVCpu->cpum.GstCtx.ecx));
7279 return rcStrict;
7280 }
7281#endif
7282
7283 /* Often a unimplemented MSR or MSR bit, so worth logging. */
7284 if (pVCpu->iem.s.cLogRelRdMsr < 32)
7285 {
7286 pVCpu->iem.s.cLogRelRdMsr++;
7287 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx));
7288 }
7289 else
7290 Log(( "IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx));
7291 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
7292 return iemRaiseGeneralProtectionFault0(pVCpu);
7293}
7294
7295
7296/**
7297 * Implements WRMSR.
7298 */
7299IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
7300{
7301 /*
7302 * Check preconditions.
7303 */
7304 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
7305 return iemRaiseUndefinedOpcode(pVCpu);
7306 if (IEM_GET_CPL(pVCpu) != 0)
7307 return iemRaiseGeneralProtectionFault0(pVCpu);
7308
7309 RTUINT64U uValue;
7310 uValue.s.Lo = pVCpu->cpum.GstCtx.eax;
7311 uValue.s.Hi = pVCpu->cpum.GstCtx.edx;
7312
7313 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7314
7315 /** @todo make CPUMAllMsrs.cpp import the necessary MSR state. */
7316 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
7317
7318 /*
7319 * Check nested-guest intercepts.
7320 */
7321#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7322 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7323 {
7324 if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_WRMSR, idMsr))
7325 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_WRMSR, cbInstr);
7326 }
7327#endif
7328
7329#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7330 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
7331 {
7332 VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, idMsr, true /* fWrite */, cbInstr);
7333 if (rcStrict == VINF_SVM_VMEXIT)
7334 return VINF_SUCCESS;
7335 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7336 {
7337 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", idMsr, VBOXSTRICTRC_VAL(rcStrict)));
7338 return rcStrict;
7339 }
7340 }
7341#endif
7342
7343 /*
7344 * Do the job.
7345 */
7346 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, idMsr, uValue.u);
7347 if (rcStrict == VINF_SUCCESS)
7348 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7349
7350#ifndef IN_RING3
7351 /* Deferred to ring-3. */
7352 if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
7353 {
7354 Log(("IEM: wrmsr(%#x) -> ring-3\n", idMsr));
7355 return rcStrict;
7356 }
7357#endif
7358
7359 /* Often a unimplemented MSR or MSR bit, so worth logging. */
7360 if (pVCpu->iem.s.cLogRelWrMsr < 32)
7361 {
7362 pVCpu->iem.s.cLogRelWrMsr++;
7363 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
7364 }
7365 else
7366 Log(( "IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
7367 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
7368 return iemRaiseGeneralProtectionFault0(pVCpu);
7369}
7370
7371
7372/**
7373 * Implements 'IN eAX, port'.
7374 *
7375 * @param u16Port The source port.
7376 * @param cbReg The register size.
7377 * @param bImmAndEffAddrMode Bit 7: Whether the port was specified through an
7378 * immediate operand or the implicit DX register.
7379 * Bits 3-0: Effective address mode.
7380 */
7381IEM_CIMPL_DEF_3(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode)
7382{
7383 /*
7384 * CPL check
7385 */
7386 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg);
7387 if (rcStrict != VINF_SUCCESS)
7388 return rcStrict;
7389
7390 /*
7391 * Check VMX nested-guest IO intercept.
7392 */
7393#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7394 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7395 {
7396 rcStrict = iemVmxVmexitInstrIo(pVCpu, VMXINSTRID_IO_IN, u16Port, RT_BOOL(bImmAndEffAddrMode & 0x80), cbReg, cbInstr);
7397 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
7398 return rcStrict;
7399 }
7400#else
7401 RT_NOREF(bImmAndEffAddrMode);
7402#endif
7403
7404 /*
7405 * Check SVM nested-guest IO intercept.
7406 */
7407#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7408 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
7409 {
7410 uint8_t cAddrSizeBits;
7411 switch (bImmAndEffAddrMode & 0xf)
7412 {
7413 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
7414 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
7415 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
7416 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7417 }
7418 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
7419 false /* fRep */, false /* fStrIo */, cbInstr);
7420 if (rcStrict == VINF_SVM_VMEXIT)
7421 return VINF_SUCCESS;
7422 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7423 {
7424 Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
7425 VBOXSTRICTRC_VAL(rcStrict)));
7426 return rcStrict;
7427 }
7428 }
7429#else
7430 RT_NOREF(bImmAndEffAddrMode);
7431#endif
7432
7433 /*
7434 * Perform the I/O.
7435 */
7436 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
7437 uint32_t u32Value = 0;
7438 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, cbReg);
7439 if (IOM_SUCCESS(rcStrict))
7440 {
7441 switch (cbReg)
7442 {
7443 case 1: pVCpu->cpum.GstCtx.al = (uint8_t)u32Value; break;
7444 case 2: pVCpu->cpum.GstCtx.ax = (uint16_t)u32Value; break;
7445 case 4: pVCpu->cpum.GstCtx.rax = u32Value; break;
7446 default: AssertFailedReturn(VERR_IEM_IPE_3);
7447 }
7448
7449 pVCpu->iem.s.cPotentialExits++;
7450 if (rcStrict != VINF_SUCCESS)
7451 iemSetPassUpStatus(pVCpu, rcStrict);
7452
7453 /*
7454 * Check for I/O breakpoints before we complete the instruction.
7455 */
7456 uint32_t const fDr7 = pVCpu->cpum.GstCtx.dr[7];
7457 if (RT_UNLIKELY( ( ( (fDr7 & X86_DR7_ENABLED_MASK)
7458 && X86_DR7_ANY_RW_IO(fDr7)
7459 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE))
7460 || pVM->dbgf.ro.cEnabledHwIoBreakpoints > 0)
7461 && rcStrict == VINF_SUCCESS))
7462 {
7463 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);
7464 pVCpu->cpum.GstCtx.eflags.uBoth |= DBGFBpCheckIo2(pVM, pVCpu, u16Port, cbReg);
7465 }
7466
7467 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7468 }
7469
7470 return rcStrict;
7471}
7472
7473
7474/**
7475 * Implements 'IN eAX, DX'.
7476 *
7477 * @param cbReg The register size.
7478 * @param enmEffAddrMode Effective address mode.
7479 */
7480IEM_CIMPL_DEF_2(iemCImpl_in_eAX_DX, uint8_t, cbReg, IEMMODE, enmEffAddrMode)
7481{
7482 return IEM_CIMPL_CALL_3(iemCImpl_in, pVCpu->cpum.GstCtx.dx, cbReg, 0 /* fImm */ | enmEffAddrMode);
7483}
7484
7485
7486/**
7487 * Implements 'OUT port, eAX'.
7488 *
7489 * @param u16Port The destination port.
7490 * @param cbReg The register size.
7491 * @param bImmAndEffAddrMode Bit 7: Whether the port was specified through an
7492 * immediate operand or the implicit DX register.
7493 * Bits 3-0: Effective address mode.
7494 */
7495IEM_CIMPL_DEF_3(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode)
7496{
7497 /*
7498 * CPL check
7499 */
7500 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg);
7501 if (rcStrict != VINF_SUCCESS)
7502 return rcStrict;
7503
7504 /*
7505 * Check VMX nested-guest I/O intercept.
7506 */
7507#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7508 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7509 {
7510 rcStrict = iemVmxVmexitInstrIo(pVCpu, VMXINSTRID_IO_OUT, u16Port, RT_BOOL(bImmAndEffAddrMode & 0x80), cbReg, cbInstr);
7511 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
7512 return rcStrict;
7513 }
7514#else
7515 RT_NOREF(bImmAndEffAddrMode);
7516#endif
7517
7518 /*
7519 * Check SVM nested-guest I/O intercept.
7520 */
7521#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7522 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
7523 {
7524 uint8_t cAddrSizeBits;
7525 switch (bImmAndEffAddrMode & 0xf)
7526 {
7527 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
7528 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
7529 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
7530 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7531 }
7532 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
7533 false /* fRep */, false /* fStrIo */, cbInstr);
7534 if (rcStrict == VINF_SVM_VMEXIT)
7535 return VINF_SUCCESS;
7536 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7537 {
7538 Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
7539 VBOXSTRICTRC_VAL(rcStrict)));
7540 return rcStrict;
7541 }
7542 }
7543#else
7544 RT_NOREF(bImmAndEffAddrMode);
7545#endif
7546
7547 /*
7548 * Perform the I/O.
7549 */
7550 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
7551 uint32_t u32Value;
7552 switch (cbReg)
7553 {
7554 case 1: u32Value = pVCpu->cpum.GstCtx.al; break;
7555 case 2: u32Value = pVCpu->cpum.GstCtx.ax; break;
7556 case 4: u32Value = pVCpu->cpum.GstCtx.eax; break;
7557 default: AssertFailedReturn(VERR_IEM_IPE_4);
7558 }
7559 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, u32Value, cbReg);
7560 if (IOM_SUCCESS(rcStrict))
7561 {
7562 pVCpu->iem.s.cPotentialExits++;
7563 if (rcStrict != VINF_SUCCESS)
7564 iemSetPassUpStatus(pVCpu, rcStrict);
7565
7566 /*
7567 * Check for I/O breakpoints before we complete the instruction.
7568 */
7569 uint32_t const fDr7 = pVCpu->cpum.GstCtx.dr[7];
7570 if (RT_UNLIKELY( ( ( (fDr7 & X86_DR7_ENABLED_MASK)
7571 && X86_DR7_ANY_RW_IO(fDr7)
7572 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE))
7573 || pVM->dbgf.ro.cEnabledHwIoBreakpoints > 0)
7574 && rcStrict == VINF_SUCCESS))
7575 {
7576 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);
7577 pVCpu->cpum.GstCtx.eflags.uBoth |= DBGFBpCheckIo2(pVM, pVCpu, u16Port, cbReg);
7578 }
7579
7580 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7581 }
7582 return rcStrict;
7583}
7584
7585
7586/**
7587 * Implements 'OUT DX, eAX'.
7588 *
7589 * @param cbReg The register size.
7590 * @param enmEffAddrMode Effective address mode.
7591 */
7592IEM_CIMPL_DEF_2(iemCImpl_out_DX_eAX, uint8_t, cbReg, IEMMODE, enmEffAddrMode)
7593{
7594 return IEM_CIMPL_CALL_3(iemCImpl_out, pVCpu->cpum.GstCtx.dx, cbReg, 0 /* fImm */ | enmEffAddrMode);
7595}
7596
7597
7598/**
7599 * Implements 'CLI'.
7600 */
7601IEM_CIMPL_DEF_0(iemCImpl_cli)
7602{
7603 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
7604#ifdef LOG_ENABLED
7605 uint32_t const fEflOld = fEfl;
7606#endif
7607
7608 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);
7609 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
7610 {
7611 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
7612 if (!(fEfl & X86_EFL_VM))
7613 {
7614 if (IEM_GET_CPL(pVCpu) <= uIopl)
7615 fEfl &= ~X86_EFL_IF;
7616 else if ( IEM_GET_CPL(pVCpu) == 3
7617 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI) )
7618 fEfl &= ~X86_EFL_VIF;
7619 else
7620 return iemRaiseGeneralProtectionFault0(pVCpu);
7621 }
7622 /* V8086 */
7623 else if (uIopl == 3)
7624 fEfl &= ~X86_EFL_IF;
7625 else if ( uIopl < 3
7626 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) )
7627 fEfl &= ~X86_EFL_VIF;
7628 else
7629 return iemRaiseGeneralProtectionFault0(pVCpu);
7630 }
7631 /* real mode */
7632 else
7633 fEfl &= ~X86_EFL_IF;
7634
7635 /* Commit. */
7636 IEMMISC_SET_EFL(pVCpu, fEfl);
7637 VBOXSTRICTRC const rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7638 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl));
7639 return rcStrict;
7640}
7641
7642
7643/**
7644 * Implements 'STI'.
7645 */
7646IEM_CIMPL_DEF_0(iemCImpl_sti)
7647{
7648 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
7649 uint32_t const fEflOld = fEfl;
7650
7651 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);
7652 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
7653 {
7654 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
7655 if (!(fEfl & X86_EFL_VM))
7656 {
7657 if (IEM_GET_CPL(pVCpu) <= uIopl)
7658 fEfl |= X86_EFL_IF;
7659 else if ( IEM_GET_CPL(pVCpu) == 3
7660 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI)
7661 && !(fEfl & X86_EFL_VIP) )
7662 fEfl |= X86_EFL_VIF;
7663 else
7664 return iemRaiseGeneralProtectionFault0(pVCpu);
7665 }
7666 /* V8086 */
7667 else if (uIopl == 3)
7668 fEfl |= X86_EFL_IF;
7669 else if ( uIopl < 3
7670 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME)
7671 && !(fEfl & X86_EFL_VIP) )
7672 fEfl |= X86_EFL_VIF;
7673 else
7674 return iemRaiseGeneralProtectionFault0(pVCpu);
7675 }
7676 /* real mode */
7677 else
7678 fEfl |= X86_EFL_IF;
7679
7680 /*
7681 * Commit.
7682 *
7683 * Note! Setting the shadow interrupt flag must be done after RIP updating.
7684 */
7685 IEMMISC_SET_EFL(pVCpu, fEfl);
7686 VBOXSTRICTRC const rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7687 if (!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF))
7688 {
7689 /** @todo only set it the shadow flag if it was clear before? */
7690 CPUMSetInInterruptShadowSti(&pVCpu->cpum.GstCtx);
7691 }
7692 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
7693 return rcStrict;
7694}
7695
7696
7697/**
7698 * Implements 'HLT'.
7699 */
7700IEM_CIMPL_DEF_0(iemCImpl_hlt)
7701{
7702 if (IEM_GET_CPL(pVCpu) != 0)
7703 return iemRaiseGeneralProtectionFault0(pVCpu);
7704
7705 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7706 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_HLT_EXIT))
7707 {
7708 Log2(("hlt: Guest intercept -> VM-exit\n"));
7709 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_HLT, cbInstr);
7710 }
7711
7712 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_HLT))
7713 {
7714 Log2(("hlt: Guest intercept -> #VMEXIT\n"));
7715 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7716 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7717 }
7718
7719 /** @todo finish: This ASSUMES that iemRegAddToRipAndFinishingClearingRF won't
7720 * be returning any status codes relating to non-guest events being raised, as
7721 * we'll mess up the guest HALT otherwise. */
7722 VBOXSTRICTRC rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7723 if (rcStrict == VINF_SUCCESS)
7724 rcStrict = VINF_EM_HALT;
7725 return rcStrict;
7726}
7727
7728
7729/**
7730 * Implements 'MONITOR'.
7731 */
7732IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
7733{
7734 /*
7735 * Permission checks.
7736 */
7737 if (IEM_GET_CPL(pVCpu) != 0)
7738 {
7739 Log2(("monitor: CPL != 0\n"));
7740 return iemRaiseUndefinedOpcode(pVCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
7741 }
7742 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
7743 {
7744 Log2(("monitor: Not in CPUID\n"));
7745 return iemRaiseUndefinedOpcode(pVCpu);
7746 }
7747
7748 /*
7749 * Check VMX guest-intercept.
7750 * This should be considered a fault-like VM-exit.
7751 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
7752 */
7753 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7754 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_MONITOR_EXIT))
7755 {
7756 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
7757 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_MONITOR, cbInstr);
7758 }
7759
7760 /*
7761 * Gather the operands and validate them.
7762 */
7763 RTGCPTR GCPtrMem = IEM_IS_64BIT_CODE(pVCpu) ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
7764 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
7765 uint32_t uEdx = pVCpu->cpum.GstCtx.edx;
7766/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
7767 * \#GP first. */
7768 if (uEcx != 0)
7769 {
7770 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx);
7771 return iemRaiseGeneralProtectionFault0(pVCpu);
7772 }
7773
7774 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
7775 if (rcStrict != VINF_SUCCESS)
7776 return rcStrict;
7777
7778 RTGCPHYS GCPhysMem;
7779 /** @todo access size */
7780 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
7781 if (rcStrict != VINF_SUCCESS)
7782 return rcStrict;
7783
7784#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7785 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7786 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
7787 {
7788 /*
7789 * MONITOR does not access the memory, just monitors the address. However,
7790 * if the address falls in the APIC-access page, the address monitored must
7791 * instead be the corresponding address in the virtual-APIC page.
7792 *
7793 * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
7794 */
7795 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA);
7796 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
7797 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
7798 return rcStrict;
7799 }
7800#endif
7801
7802 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
7803 {
7804 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
7805 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7806 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7807 }
7808
7809 /*
7810 * Call EM to prepare the monitor/wait.
7811 */
7812 rcStrict = EMMonitorWaitPrepare(pVCpu, pVCpu->cpum.GstCtx.rax, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.rdx, GCPhysMem);
7813 Assert(rcStrict == VINF_SUCCESS);
7814 if (rcStrict == VINF_SUCCESS)
7815 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7816 return rcStrict;
7817}
7818
7819
7820/**
7821 * Implements 'MWAIT'.
7822 */
7823IEM_CIMPL_DEF_0(iemCImpl_mwait)
7824{
7825 /*
7826 * Permission checks.
7827 */
7828 if (IEM_GET_CPL(pVCpu) != 0)
7829 {
7830 Log2(("mwait: CPL != 0\n"));
7831 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
7832 * EFLAGS.VM then.) */
7833 return iemRaiseUndefinedOpcode(pVCpu);
7834 }
7835 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
7836 {
7837 Log2(("mwait: Not in CPUID\n"));
7838 return iemRaiseUndefinedOpcode(pVCpu);
7839 }
7840
7841 /* Check VMX nested-guest intercept. */
7842 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7843 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_MWAIT_EXIT))
7844 IEM_VMX_VMEXIT_MWAIT_RET(pVCpu, EMMonitorIsArmed(pVCpu), cbInstr);
7845
7846 /*
7847 * Gather the operands and validate them.
7848 */
7849 uint32_t const uEax = pVCpu->cpum.GstCtx.eax;
7850 uint32_t const uEcx = pVCpu->cpum.GstCtx.ecx;
7851 if (uEcx != 0)
7852 {
7853 /* Only supported extension is break on IRQ when IF=0. */
7854 if (uEcx > 1)
7855 {
7856 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
7857 return iemRaiseGeneralProtectionFault0(pVCpu);
7858 }
7859 uint32_t fMWaitFeatures = 0;
7860 uint32_t uIgnore = 0;
7861 CPUMGetGuestCpuId(pVCpu, 5, 0, -1 /*f64BitMode*/, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
7862 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
7863 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
7864 {
7865 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
7866 return iemRaiseGeneralProtectionFault0(pVCpu);
7867 }
7868
7869#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7870 /*
7871 * If the interrupt-window exiting control is set or a virtual-interrupt is pending
7872 * for delivery; and interrupts are disabled the processor does not enter its
7873 * mwait state but rather passes control to the next instruction.
7874 *
7875 * See Intel spec. 25.3 "Changes to Instruction Behavior In VMX Non-root Operation".
7876 */
7877 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7878 && !pVCpu->cpum.GstCtx.eflags.Bits.u1IF)
7879 {
7880 if ( IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INT_WINDOW_EXIT)
7881 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
7882 /** @todo finish: check up this out after we move int window stuff out of the
7883 * run loop and into the instruction finishing logic here. */
7884 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7885 }
7886#endif
7887 }
7888
7889 /*
7890 * Check SVM nested-guest mwait intercepts.
7891 */
7892 if ( IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT_ARMED)
7893 && EMMonitorIsArmed(pVCpu))
7894 {
7895 Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n"));
7896 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7897 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7898 }
7899 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
7900 {
7901 Log2(("mwait: Guest intercept -> #VMEXIT\n"));
7902 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7903 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7904 }
7905
7906 /*
7907 * Call EM to prepare the monitor/wait.
7908 *
7909 * This will return VINF_EM_HALT. If there the trap flag is set, we may
7910 * override it when executing iemRegAddToRipAndFinishingClearingRF ASSUMING
7911 * that will only return guest related events.
7912 */
7913 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(pVCpu, uEax, uEcx);
7914
7915 /** @todo finish: This needs more thinking as we should suppress internal
7916 * debugger events here, or we'll bugger up the guest state even more than we
7917 * alread do around VINF_EM_HALT. */
7918 VBOXSTRICTRC rcStrict2 = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7919 if (rcStrict2 != VINF_SUCCESS)
7920 {
7921 Log2(("mwait: %Rrc (perform) -> %Rrc (finish)!\n", VBOXSTRICTRC_VAL(rcStrict), VBOXSTRICTRC_VAL(rcStrict2) ));
7922 rcStrict = rcStrict2;
7923 }
7924
7925 return rcStrict;
7926}
7927
7928
7929/**
7930 * Implements 'SWAPGS'.
7931 */
7932IEM_CIMPL_DEF_0(iemCImpl_swapgs)
7933{
7934 Assert(IEM_IS_64BIT_CODE(pVCpu)); /* Caller checks this. */
7935
7936 /*
7937 * Permission checks.
7938 */
7939 if (IEM_GET_CPL(pVCpu) != 0)
7940 {
7941 Log2(("swapgs: CPL != 0\n"));
7942 return iemRaiseUndefinedOpcode(pVCpu);
7943 }
7944
7945 /*
7946 * Do the job.
7947 */
7948 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_GS);
7949 uint64_t uOtherGsBase = pVCpu->cpum.GstCtx.msrKERNELGSBASE;
7950 pVCpu->cpum.GstCtx.msrKERNELGSBASE = pVCpu->cpum.GstCtx.gs.u64Base;
7951 pVCpu->cpum.GstCtx.gs.u64Base = uOtherGsBase;
7952
7953 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7954}
7955
7956
7957#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
7958/**
7959 * Handles a CPUID call.
7960 */
7961static VBOXSTRICTRC iemCpuIdVBoxCall(PVMCPUCC pVCpu, uint32_t iFunction,
7962 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
7963{
7964 switch (iFunction)
7965 {
7966 case VBOX_CPUID_FN_ID:
7967 LogFlow(("iemCpuIdVBoxCall: VBOX_CPUID_FN_ID\n"));
7968 *pEax = VBOX_CPUID_RESP_ID_EAX;
7969 *pEbx = VBOX_CPUID_RESP_ID_EBX;
7970 *pEcx = VBOX_CPUID_RESP_ID_ECX;
7971 *pEdx = VBOX_CPUID_RESP_ID_EDX;
7972 break;
7973
7974 case VBOX_CPUID_FN_LOG:
7975 {
7976 CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX | CPUMCTX_EXTRN_RSI
7977 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7978
7979 /* Validate input. */
7980 uint32_t cchToLog = *pEdx;
7981 if (cchToLog <= _2M)
7982 {
7983 uint32_t const uLogPicker = *pEbx;
7984 if (uLogPicker <= 1)
7985 {
7986 /* Resolve the logger. */
7987 PRTLOGGER const pLogger = !uLogPicker
7988 ? RTLogDefaultInstanceEx(UINT32_MAX) : RTLogRelGetDefaultInstanceEx(UINT32_MAX);
7989 if (pLogger)
7990 {
7991 /* Copy over the data: */
7992 RTGCPTR GCPtrSrc = pVCpu->cpum.GstCtx.rsi;
7993 while (cchToLog > 0)
7994 {
7995 uint32_t cbToMap = GUEST_PAGE_SIZE - (GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
7996 if (cbToMap > cchToLog)
7997 cbToMap = cchToLog;
7998 /** @todo Extend iemMemMap to allowing page size accessing and avoid 7
7999 * unnecessary calls & iterations per pages. */
8000 if (cbToMap > 512)
8001 cbToMap = 512;
8002 void *pvSrc = NULL;
8003 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvSrc, cbToMap, UINT8_MAX, GCPtrSrc, IEM_ACCESS_DATA_R, 0);
8004 if (rcStrict == VINF_SUCCESS)
8005 {
8006 RTLogBulkNestedWrite(pLogger, (const char *)pvSrc, cbToMap, "Gst:");
8007 rcStrict = iemMemCommitAndUnmap(pVCpu, pvSrc, IEM_ACCESS_DATA_R);
8008 AssertRCSuccessReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
8009 }
8010 else
8011 {
8012 Log(("iemCpuIdVBoxCall: %Rrc at %RGp LB %#x\n", VBOXSTRICTRC_VAL(rcStrict), GCPtrSrc, cbToMap));
8013 return rcStrict;
8014 }
8015
8016 /* Advance. */
8017 pVCpu->cpum.GstCtx.rsi = GCPtrSrc += cbToMap;
8018 *pEdx = cchToLog -= cbToMap;
8019 }
8020 *pEax = VINF_SUCCESS;
8021 }
8022 else
8023 *pEax = (uint32_t)VERR_NOT_FOUND;
8024 }
8025 else
8026 *pEax = (uint32_t)VERR_NOT_FOUND;
8027 }
8028 else
8029 *pEax = (uint32_t)VERR_TOO_MUCH_DATA;
8030 *pEdx = VBOX_CPUID_RESP_GEN_EDX;
8031 *pEcx = VBOX_CPUID_RESP_GEN_ECX;
8032 *pEbx = VBOX_CPUID_RESP_GEN_EBX;
8033 break;
8034 }
8035
8036 default:
8037 LogFlow(("iemCpuIdVBoxCall: Invalid function %#x (%#x, %#x)\n", iFunction, *pEbx, *pEdx));
8038 *pEax = (uint32_t)VERR_INVALID_FUNCTION;
8039 *pEbx = (uint32_t)VERR_INVALID_FUNCTION;
8040 *pEcx = (uint32_t)VERR_INVALID_FUNCTION;
8041 *pEdx = (uint32_t)VERR_INVALID_FUNCTION;
8042 break;
8043 }
8044 return VINF_SUCCESS;
8045}
8046#endif /* VBOX_WITHOUT_CPUID_HOST_CALL */
8047
8048/**
8049 * Implements 'CPUID'.
8050 */
8051IEM_CIMPL_DEF_0(iemCImpl_cpuid)
8052{
8053 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8054 {
8055 Log2(("cpuid: Guest intercept -> VM-exit\n"));
8056 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_CPUID, cbInstr);
8057 }
8058
8059 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
8060 {
8061 Log2(("cpuid: Guest intercept -> #VMEXIT\n"));
8062 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
8063 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
8064 }
8065
8066
8067 uint32_t const uEax = pVCpu->cpum.GstCtx.eax;
8068 uint32_t const uEcx = pVCpu->cpum.GstCtx.ecx;
8069
8070#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
8071 /*
8072 * CPUID host call backdoor.
8073 */
8074 if ( uEax == VBOX_CPUID_REQ_EAX_FIXED
8075 && (uEcx & VBOX_CPUID_REQ_ECX_FIXED_MASK) == VBOX_CPUID_REQ_ECX_FIXED
8076 && pVCpu->CTX_SUFF(pVM)->iem.s.fCpuIdHostCall)
8077 {
8078 VBOXSTRICTRC rcStrict = iemCpuIdVBoxCall(pVCpu, uEcx & VBOX_CPUID_REQ_ECX_FN_MASK,
8079 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx,
8080 &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
8081 if (rcStrict != VINF_SUCCESS)
8082 return rcStrict;
8083 }
8084 /*
8085 * Regular CPUID.
8086 */
8087 else
8088#endif
8089 CPUMGetGuestCpuId(pVCpu, uEax, uEcx, pVCpu->cpum.GstCtx.cs.Attr.n.u1Long,
8090 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
8091
8092 pVCpu->cpum.GstCtx.rax &= UINT32_C(0xffffffff);
8093 pVCpu->cpum.GstCtx.rbx &= UINT32_C(0xffffffff);
8094 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff);
8095 pVCpu->cpum.GstCtx.rdx &= UINT32_C(0xffffffff);
8096 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
8097
8098 pVCpu->iem.s.cPotentialExits++;
8099 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8100}
8101
8102
8103/**
8104 * Implements 'AAD'.
8105 *
8106 * @param bImm The immediate operand.
8107 */
8108IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
8109{
8110 uint16_t const ax = pVCpu->cpum.GstCtx.ax;
8111 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
8112 pVCpu->cpum.GstCtx.ax = al;
8113 iemHlpUpdateArithEFlagsU8(pVCpu, al,
8114 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
8115 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
8116
8117 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8118}
8119
8120
8121/**
8122 * Implements 'AAM'.
8123 *
8124 * @param bImm The immediate operand. Cannot be 0.
8125 */
8126IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
8127{
8128 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
8129
8130 uint16_t const ax = pVCpu->cpum.GstCtx.ax;
8131 uint8_t const al = (uint8_t)ax % bImm;
8132 uint8_t const ah = (uint8_t)ax / bImm;
8133 pVCpu->cpum.GstCtx.ax = (ah << 8) + al;
8134 iemHlpUpdateArithEFlagsU8(pVCpu, al,
8135 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
8136 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
8137
8138 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8139}
8140
8141
8142/**
8143 * Implements 'DAA'.
8144 */
8145IEM_CIMPL_DEF_0(iemCImpl_daa)
8146{
8147 uint8_t const al = pVCpu->cpum.GstCtx.al;
8148 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF;
8149
8150 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8151 || (al & 0xf) >= 10)
8152 {
8153 pVCpu->cpum.GstCtx.al = al + 6;
8154 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8155 }
8156 else
8157 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8158
8159 if (al >= 0x9a || fCarry)
8160 {
8161 pVCpu->cpum.GstCtx.al += 0x60;
8162 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8163 }
8164 else
8165 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8166
8167 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8168 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8169}
8170
8171
8172/**
8173 * Implements 'DAS'.
8174 */
8175IEM_CIMPL_DEF_0(iemCImpl_das)
8176{
8177 uint8_t const uInputAL = pVCpu->cpum.GstCtx.al;
8178 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF;
8179
8180 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8181 || (uInputAL & 0xf) >= 10)
8182 {
8183 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8184 if (uInputAL < 6)
8185 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8186 pVCpu->cpum.GstCtx.al = uInputAL - 6;
8187 }
8188 else
8189 {
8190 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8191 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8192 }
8193
8194 if (uInputAL >= 0x9a || fCarry)
8195 {
8196 pVCpu->cpum.GstCtx.al -= 0x60;
8197 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8198 }
8199
8200 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8201 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8202}
8203
8204
8205/**
8206 * Implements 'AAA'.
8207 */
8208IEM_CIMPL_DEF_0(iemCImpl_aaa)
8209{
8210 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
8211 {
8212 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8213 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8214 {
8215 iemAImpl_add_u16(&pVCpu->cpum.GstCtx.ax, 0x106, &pVCpu->cpum.GstCtx.eflags.uBoth);
8216 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8217 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8218 }
8219 else
8220 {
8221 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8222 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8223 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8224 }
8225 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8226 }
8227 else
8228 {
8229 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8230 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8231 {
8232 pVCpu->cpum.GstCtx.ax += UINT16_C(0x106);
8233 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8234 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8235 }
8236 else
8237 {
8238 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8239 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8240 }
8241 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8242 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8243 }
8244
8245 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8246}
8247
8248
8249/**
8250 * Implements 'AAS'.
8251 */
8252IEM_CIMPL_DEF_0(iemCImpl_aas)
8253{
8254 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
8255 {
8256 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8257 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8258 {
8259 iemAImpl_sub_u16(&pVCpu->cpum.GstCtx.ax, 0x106, &pVCpu->cpum.GstCtx.eflags.uBoth);
8260 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8261 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8262 }
8263 else
8264 {
8265 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8266 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8267 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8268 }
8269 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8270 }
8271 else
8272 {
8273 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8274 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8275 {
8276 pVCpu->cpum.GstCtx.ax -= UINT16_C(0x106);
8277 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8278 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8279 }
8280 else
8281 {
8282 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8283 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8284 }
8285 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8286 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8287 }
8288
8289 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8290}
8291
8292
8293/**
8294 * Implements the 16-bit version of 'BOUND'.
8295 *
8296 * @note We have separate 16-bit and 32-bit variants of this function due to
8297 * the decoder using unsigned parameters, whereas we want signed one to
8298 * do the job. This is significant for a recompiler.
8299 */
8300IEM_CIMPL_DEF_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound)
8301{
8302 /*
8303 * Check if the index is inside the bounds, otherwise raise #BR.
8304 */
8305 if ( idxArray >= idxLowerBound
8306 && idxArray <= idxUpperBound)
8307 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8308 return iemRaiseBoundRangeExceeded(pVCpu);
8309}
8310
8311
8312/**
8313 * Implements the 32-bit version of 'BOUND'.
8314 */
8315IEM_CIMPL_DEF_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound)
8316{
8317 /*
8318 * Check if the index is inside the bounds, otherwise raise #BR.
8319 */
8320 if ( idxArray >= idxLowerBound
8321 && idxArray <= idxUpperBound)
8322 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8323 return iemRaiseBoundRangeExceeded(pVCpu);
8324}
8325
8326
8327
8328/*
8329 * Instantiate the various string operation combinations.
8330 */
8331#define OP_SIZE 8
8332#define ADDR_SIZE 16
8333#include "IEMAllCImplStrInstr.cpp.h"
8334#define OP_SIZE 8
8335#define ADDR_SIZE 32
8336#include "IEMAllCImplStrInstr.cpp.h"
8337#define OP_SIZE 8
8338#define ADDR_SIZE 64
8339#include "IEMAllCImplStrInstr.cpp.h"
8340
8341#define OP_SIZE 16
8342#define ADDR_SIZE 16
8343#include "IEMAllCImplStrInstr.cpp.h"
8344#define OP_SIZE 16
8345#define ADDR_SIZE 32
8346#include "IEMAllCImplStrInstr.cpp.h"
8347#define OP_SIZE 16
8348#define ADDR_SIZE 64
8349#include "IEMAllCImplStrInstr.cpp.h"
8350
8351#define OP_SIZE 32
8352#define ADDR_SIZE 16
8353#include "IEMAllCImplStrInstr.cpp.h"
8354#define OP_SIZE 32
8355#define ADDR_SIZE 32
8356#include "IEMAllCImplStrInstr.cpp.h"
8357#define OP_SIZE 32
8358#define ADDR_SIZE 64
8359#include "IEMAllCImplStrInstr.cpp.h"
8360
8361#define OP_SIZE 64
8362#define ADDR_SIZE 32
8363#include "IEMAllCImplStrInstr.cpp.h"
8364#define OP_SIZE 64
8365#define ADDR_SIZE 64
8366#include "IEMAllCImplStrInstr.cpp.h"
8367
8368
8369/**
8370 * Implements 'XGETBV'.
8371 */
8372IEM_CIMPL_DEF_0(iemCImpl_xgetbv)
8373{
8374 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
8375 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)
8376 {
8377 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
8378 switch (uEcx)
8379 {
8380 case 0:
8381 break;
8382
8383 case 1: /** @todo Implement XCR1 support. */
8384 default:
8385 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx));
8386 return iemRaiseGeneralProtectionFault0(pVCpu);
8387
8388 }
8389 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx);
8390 pVCpu->cpum.GstCtx.rax = RT_LO_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]);
8391 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]);
8392
8393 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8394 }
8395 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n"));
8396 return iemRaiseUndefinedOpcode(pVCpu);
8397}
8398
8399
8400/**
8401 * Implements 'XSETBV'.
8402 */
8403IEM_CIMPL_DEF_0(iemCImpl_xsetbv)
8404{
8405 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)
8406 {
8407 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
8408 {
8409 Log2(("xsetbv: Guest intercept -> #VMEXIT\n"));
8410 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
8411 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
8412 }
8413
8414 if (IEM_GET_CPL(pVCpu) == 0)
8415 {
8416 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx);
8417
8418 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8419 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_XSETBV, cbInstr);
8420
8421 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
8422 uint64_t uNewValue = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx);
8423 switch (uEcx)
8424 {
8425 case 0:
8426 {
8427 int rc = CPUMSetGuestXcr0(pVCpu, uNewValue);
8428 if (rc == VINF_SUCCESS)
8429 break;
8430 Assert(rc == VERR_CPUM_RAISE_GP_0);
8431 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
8432 return iemRaiseGeneralProtectionFault0(pVCpu);
8433 }
8434
8435 case 1: /** @todo Implement XCR1 support. */
8436 default:
8437 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
8438 return iemRaiseGeneralProtectionFault0(pVCpu);
8439
8440 }
8441
8442 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8443 }
8444
8445 Log(("xsetbv cpl=%u -> GP(0)\n", IEM_GET_CPL(pVCpu)));
8446 return iemRaiseGeneralProtectionFault0(pVCpu);
8447 }
8448 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n"));
8449 return iemRaiseUndefinedOpcode(pVCpu);
8450}
8451
8452#ifndef RT_ARCH_ARM64
8453# ifdef IN_RING3
8454
8455/** Argument package for iemCImpl_cmpxchg16b_fallback_rendezvous_callback. */
8456struct IEMCIMPLCX16ARGS
8457{
8458 PRTUINT128U pu128Dst;
8459 PRTUINT128U pu128RaxRdx;
8460 PRTUINT128U pu128RbxRcx;
8461 uint32_t *pEFlags;
8462# ifdef VBOX_STRICT
8463 uint32_t cCalls;
8464# endif
8465};
8466
8467/**
8468 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
8469 * Worker for iemCImpl_cmpxchg16b_fallback_rendezvous}
8470 */
8471static DECLCALLBACK(VBOXSTRICTRC) iemCImpl_cmpxchg16b_fallback_rendezvous_callback(PVM pVM, PVMCPUCC pVCpu, void *pvUser)
8472{
8473 RT_NOREF(pVM, pVCpu);
8474 struct IEMCIMPLCX16ARGS *pArgs = (struct IEMCIMPLCX16ARGS *)pvUser;
8475# ifdef VBOX_STRICT
8476 Assert(pArgs->cCalls == 0);
8477 pArgs->cCalls++;
8478# endif
8479
8480 iemAImpl_cmpxchg16b_fallback(pArgs->pu128Dst, pArgs->pu128RaxRdx, pArgs->pu128RbxRcx, pArgs->pEFlags);
8481 return VINF_SUCCESS;
8482}
8483
8484# endif /* IN_RING3 */
8485
8486/**
8487 * Implements 'CMPXCHG16B' fallback using rendezvous.
8488 */
8489IEM_CIMPL_DEF_4(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
8490 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags)
8491{
8492# ifdef IN_RING3
8493 struct IEMCIMPLCX16ARGS Args;
8494 Args.pu128Dst = pu128Dst;
8495 Args.pu128RaxRdx = pu128RaxRdx;
8496 Args.pu128RbxRcx = pu128RbxRcx;
8497 Args.pEFlags = pEFlags;
8498# ifdef VBOX_STRICT
8499 Args.cCalls = 0;
8500# endif
8501 VBOXSTRICTRC rcStrict = VMMR3EmtRendezvous(pVCpu->CTX_SUFF(pVM), VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
8502 iemCImpl_cmpxchg16b_fallback_rendezvous_callback, &Args);
8503 Assert(Args.cCalls == 1);
8504 if (rcStrict == VINF_SUCCESS)
8505 {
8506 /* Duplicated tail code. */
8507 rcStrict = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_RW);
8508 if (rcStrict == VINF_SUCCESS)
8509 {
8510 pVCpu->cpum.GstCtx.eflags.u = *pEFlags; /* IEM_MC_COMMIT_EFLAGS */
8511 if (!(*pEFlags & X86_EFL_ZF))
8512 {
8513 pVCpu->cpum.GstCtx.rax = pu128RaxRdx->s.Lo;
8514 pVCpu->cpum.GstCtx.rdx = pu128RaxRdx->s.Hi;
8515 }
8516 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8517 }
8518 }
8519 return rcStrict;
8520# else
8521 RT_NOREF(pVCpu, cbInstr, pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags);
8522 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; /* This should get us to ring-3 for now. Should perhaps be replaced later. */
8523# endif
8524}
8525
8526#endif /* RT_ARCH_ARM64 */
8527
8528/**
8529 * Implements 'CLFLUSH' and 'CLFLUSHOPT'.
8530 *
8531 * This is implemented in C because it triggers a load like behaviour without
8532 * actually reading anything. Since that's not so common, it's implemented
8533 * here.
8534 *
8535 * @param iEffSeg The effective segment.
8536 * @param GCPtrEff The address of the image.
8537 */
8538IEM_CIMPL_DEF_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
8539{
8540 /*
8541 * Pretend to do a load w/o reading (see also iemCImpl_monitor and iemMemMap).
8542 */
8543 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrEff);
8544 if (rcStrict == VINF_SUCCESS)
8545 {
8546 RTGCPHYS GCPhysMem;
8547 /** @todo access size. */
8548 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrEff, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
8549 if (rcStrict == VINF_SUCCESS)
8550 {
8551#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8552 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8553 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
8554 {
8555 /*
8556 * CLFLUSH/CLFLUSHOPT does not access the memory, but flushes the cache-line
8557 * that contains the address. However, if the address falls in the APIC-access
8558 * page, the address flushed must instead be the corresponding address in the
8559 * virtual-APIC page.
8560 *
8561 * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
8562 */
8563 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA);
8564 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
8565 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
8566 return rcStrict;
8567 }
8568#endif
8569 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8570 }
8571 }
8572
8573 return rcStrict;
8574}
8575
8576
8577/**
8578 * Implements 'FINIT' and 'FNINIT'.
8579 *
8580 * @param fCheckXcpts Whether to check for umasked pending exceptions or
8581 * not.
8582 */
8583IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
8584{
8585 /*
8586 * Exceptions.
8587 */
8588 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8589 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS))
8590 return iemRaiseDeviceNotAvailable(pVCpu);
8591
8592 iemFpuActualizeStateForChange(pVCpu);
8593 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_X87);
8594
8595 /* FINIT: Raise #MF on pending exception(s): */
8596 if (fCheckXcpts && (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES))
8597 return iemRaiseMathFault(pVCpu);
8598
8599 /*
8600 * Reset the state.
8601 */
8602 PX86XSAVEAREA pXState = &pVCpu->cpum.GstCtx.XState;
8603
8604 /* Rotate the stack to account for changed TOS. */
8605 iemFpuRotateStackSetTop(&pXState->x87, 0);
8606
8607 pXState->x87.FCW = 0x37f;
8608 pXState->x87.FSW = 0;
8609 pXState->x87.FTW = 0x00; /* 0 - empty. */
8610 /** @todo Intel says the instruction and data pointers are not cleared on
8611 * 387, presume that 8087 and 287 doesn't do so either. */
8612 /** @todo test this stuff. */
8613 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
8614 {
8615 pXState->x87.FPUDP = 0;
8616 pXState->x87.DS = 0; //??
8617 pXState->x87.Rsrvd2 = 0;
8618 pXState->x87.FPUIP = 0;
8619 pXState->x87.CS = 0; //??
8620 pXState->x87.Rsrvd1 = 0;
8621 }
8622 pXState->x87.FOP = 0;
8623
8624 iemHlpUsedFpu(pVCpu);
8625 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8626}
8627
8628
8629/**
8630 * Implements 'FXSAVE'.
8631 *
8632 * @param iEffSeg The effective segment.
8633 * @param GCPtrEff The address of the image.
8634 * @param enmEffOpSize The operand size (only REX.W really matters).
8635 */
8636IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8637{
8638 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8639
8640 /** @todo check out bugref{1529} and AMD behaviour */
8641
8642 /*
8643 * Raise exceptions.
8644 */
8645 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
8646 return iemRaiseDeviceNotAvailable(pVCpu);
8647
8648 /*
8649 * Access the memory.
8650 */
8651 void *pvMem512;
8652 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
8653 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8654 if (rcStrict != VINF_SUCCESS)
8655 return rcStrict;
8656 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
8657 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.XState.x87;
8658
8659 /*
8660 * Store the registers.
8661 */
8662 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
8663 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
8664
8665 /* common for all formats */
8666 pDst->FCW = pSrc->FCW;
8667 pDst->FSW = pSrc->FSW;
8668 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8669 pDst->FOP = pSrc->FOP;
8670 pDst->MXCSR = pSrc->MXCSR;
8671 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8672 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8673 {
8674 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
8675 * them for now... */
8676 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8677 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8678 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8679 pDst->aRegs[i].au32[3] = 0;
8680 }
8681
8682 /* FPU IP, CS, DP and DS. */
8683 pDst->FPUIP = pSrc->FPUIP;
8684 pDst->CS = pSrc->CS;
8685 pDst->FPUDP = pSrc->FPUDP;
8686 pDst->DS = pSrc->DS;
8687 if (enmEffOpSize == IEMMODE_64BIT)
8688 {
8689 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8690 pDst->Rsrvd1 = pSrc->Rsrvd1;
8691 pDst->Rsrvd2 = pSrc->Rsrvd2;
8692 }
8693 else
8694 {
8695 pDst->Rsrvd1 = 0;
8696 pDst->Rsrvd2 = 0;
8697 }
8698
8699 /* XMM registers. Skipped in 64-bit CPL0 if EFER.FFXSR (AMD only) is set. */
8700 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR)
8701 || !IEM_IS_64BIT_CODE(pVCpu)
8702 || IEM_GET_CPL(pVCpu) != 0)
8703 {
8704 uint32_t cXmmRegs = IEM_IS_64BIT_CODE(pVCpu) ? 16 : 8;
8705 for (uint32_t i = 0; i < cXmmRegs; i++)
8706 pDst->aXMM[i] = pSrc->aXMM[i];
8707 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8708 * right? */
8709 }
8710
8711 /*
8712 * Commit the memory.
8713 */
8714 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8715 if (rcStrict != VINF_SUCCESS)
8716 return rcStrict;
8717
8718 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8719}
8720
8721
8722/**
8723 * Implements 'FXRSTOR'.
8724 *
8725 * @param iEffSeg The effective segment register for @a GCPtrEff.
8726 * @param GCPtrEff The address of the image.
8727 * @param enmEffOpSize The operand size (only REX.W really matters).
8728 */
8729IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8730{
8731 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8732
8733 /** @todo check out bugref{1529} and AMD behaviour */
8734
8735 /*
8736 * Raise exceptions.
8737 */
8738 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
8739 return iemRaiseDeviceNotAvailable(pVCpu);
8740
8741 /*
8742 * Access the memory.
8743 */
8744 void *pvMem512;
8745 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
8746 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8747 if (rcStrict != VINF_SUCCESS)
8748 return rcStrict;
8749 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
8750 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.XState.x87;
8751
8752 /*
8753 * Check the state for stuff which will #GP(0).
8754 */
8755 uint32_t const fMXCSR = pSrc->MXCSR;
8756 uint32_t const fMXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8757 if (fMXCSR & ~fMXCSR_MASK)
8758 {
8759 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
8760 return iemRaiseGeneralProtectionFault0(pVCpu);
8761 }
8762
8763 /*
8764 * Load the registers.
8765 */
8766 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
8767 * implementation specific whether MXCSR and XMM0-XMM7 are
8768 * restored according to Intel.
8769 * AMD says MXCSR and XMM registers are never loaded if
8770 * CR4.OSFXSR=0.
8771 */
8772
8773 /* common for all formats */
8774 pDst->FCW = pSrc->FCW;
8775 pDst->FSW = pSrc->FSW;
8776 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8777 pDst->FOP = pSrc->FOP;
8778 pDst->MXCSR = fMXCSR;
8779 /* (MXCSR_MASK is read-only) */
8780 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
8781 {
8782 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8783 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8784 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8785 pDst->aRegs[i].au32[3] = 0;
8786 }
8787
8788 /* FPU IP, CS, DP and DS. */
8789 /** @todo AMD says this is only done if FSW.ES is set after loading. */
8790 if (enmEffOpSize == IEMMODE_64BIT)
8791 {
8792 pDst->FPUIP = pSrc->FPUIP;
8793 pDst->CS = pSrc->CS;
8794 pDst->Rsrvd1 = pSrc->Rsrvd1;
8795 pDst->FPUDP = pSrc->FPUDP;
8796 pDst->DS = pSrc->DS;
8797 pDst->Rsrvd2 = pSrc->Rsrvd2;
8798 }
8799 else
8800 {
8801 pDst->FPUIP = pSrc->FPUIP;
8802 pDst->CS = pSrc->CS;
8803 pDst->Rsrvd1 = 0;
8804 pDst->FPUDP = pSrc->FPUDP;
8805 pDst->DS = pSrc->DS;
8806 pDst->Rsrvd2 = 0;
8807 }
8808
8809 /* XMM registers. Skipped in 64-bit CPL0 if EFER.FFXSR (AMD only) is set.
8810 * Does not affect MXCSR, only registers.
8811 */
8812 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR)
8813 || !IEM_IS_64BIT_CODE(pVCpu)
8814 || IEM_GET_CPL(pVCpu) != 0)
8815 {
8816 uint32_t cXmmRegs = IEM_IS_64BIT_CODE(pVCpu) ? 16 : 8;
8817 for (uint32_t i = 0; i < cXmmRegs; i++)
8818 pDst->aXMM[i] = pSrc->aXMM[i];
8819 }
8820
8821 pDst->FCW &= ~X86_FCW_ZERO_MASK | X86_FCW_IC_MASK; /* Intel 10980xe allows setting the IC bit. Win 3.11 CALC.EXE sets it. */
8822 iemFpuRecalcExceptionStatus(pDst);
8823
8824 if (pDst->FSW & X86_FSW_ES)
8825 Log11(("fxrstor: %04x:%08RX64: loading state with pending FPU exception (FSW=%#x)\n",
8826 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pSrc->FSW));
8827
8828 /*
8829 * Unmap the memory.
8830 */
8831 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
8832 if (rcStrict != VINF_SUCCESS)
8833 return rcStrict;
8834
8835 iemHlpUsedFpu(pVCpu);
8836 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8837}
8838
8839
8840/**
8841 * Implements 'XSAVE'.
8842 *
8843 * @param iEffSeg The effective segment.
8844 * @param GCPtrEff The address of the image.
8845 * @param enmEffOpSize The operand size (only REX.W really matters).
8846 */
8847IEM_CIMPL_DEF_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8848{
8849 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
8850
8851 /*
8852 * Raise exceptions.
8853 */
8854 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
8855 return iemRaiseUndefinedOpcode(pVCpu);
8856 /* When in VMX non-root mode and XSAVE/XRSTOR is not enabled, it results in #UD. */
8857 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8858 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_XSAVES_XRSTORS))
8859 {
8860 Log(("xrstor: Not enabled for nested-guest execution -> #UD\n"));
8861 return iemRaiseUndefinedOpcode(pVCpu);
8862 }
8863 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)
8864 return iemRaiseDeviceNotAvailable(pVCpu);
8865
8866 /*
8867 * Calc the requested mask.
8868 */
8869 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0];
8870 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
8871 uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0];
8872
8873/** @todo figure out the exact protocol for the memory access. Currently we
8874 * just need this crap to work halfways to make it possible to test
8875 * AVX instructions. */
8876/** @todo figure out the XINUSE and XMODIFIED */
8877
8878 /*
8879 * Access the x87 memory state.
8880 */
8881 /* The x87+SSE state. */
8882 void *pvMem512;
8883 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
8884 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8885 if (rcStrict != VINF_SUCCESS)
8886 return rcStrict;
8887 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
8888 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.XState.x87;
8889
8890 /* The header. */
8891 PX86XSAVEHDR pHdr;
8892 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW, 0 /* checked above */);
8893 if (rcStrict != VINF_SUCCESS)
8894 return rcStrict;
8895
8896 /*
8897 * Store the X87 state.
8898 */
8899 if (fReqComponents & XSAVE_C_X87)
8900 {
8901 /* common for all formats */
8902 pDst->FCW = pSrc->FCW;
8903 pDst->FSW = pSrc->FSW;
8904 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8905 pDst->FOP = pSrc->FOP;
8906 pDst->FPUIP = pSrc->FPUIP;
8907 pDst->CS = pSrc->CS;
8908 pDst->FPUDP = pSrc->FPUDP;
8909 pDst->DS = pSrc->DS;
8910 if (enmEffOpSize == IEMMODE_64BIT)
8911 {
8912 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8913 pDst->Rsrvd1 = pSrc->Rsrvd1;
8914 pDst->Rsrvd2 = pSrc->Rsrvd2;
8915 }
8916 else
8917 {
8918 pDst->Rsrvd1 = 0;
8919 pDst->Rsrvd2 = 0;
8920 }
8921 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8922 {
8923 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
8924 * them for now... */
8925 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8926 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8927 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8928 pDst->aRegs[i].au32[3] = 0;
8929 }
8930
8931 }
8932
8933 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
8934 {
8935 pDst->MXCSR = pSrc->MXCSR;
8936 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8937 }
8938
8939 if (fReqComponents & XSAVE_C_SSE)
8940 {
8941 /* XMM registers. */
8942 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8943 for (uint32_t i = 0; i < cXmmRegs; i++)
8944 pDst->aXMM[i] = pSrc->aXMM[i];
8945 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8946 * right? */
8947 }
8948
8949 /* Commit the x87 state bits. (probably wrong) */
8950 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8951 if (rcStrict != VINF_SUCCESS)
8952 return rcStrict;
8953
8954 /*
8955 * Store AVX state.
8956 */
8957 if (fReqComponents & XSAVE_C_YMM)
8958 {
8959 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
8960 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
8961 PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI);
8962 PX86XSAVEYMMHI pCompDst;
8963 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
8964 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 0 /* checked above */);
8965 if (rcStrict != VINF_SUCCESS)
8966 return rcStrict;
8967
8968 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8969 for (uint32_t i = 0; i < cXmmRegs; i++)
8970 pCompDst->aYmmHi[i] = pCompSrc->aYmmHi[i];
8971
8972 rcStrict = iemMemCommitAndUnmap(pVCpu, pCompDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8973 if (rcStrict != VINF_SUCCESS)
8974 return rcStrict;
8975 }
8976
8977 /*
8978 * Update the header.
8979 */
8980 pHdr->bmXState = (pHdr->bmXState & ~fReqComponents)
8981 | (fReqComponents & fXInUse);
8982
8983 rcStrict = iemMemCommitAndUnmap(pVCpu, pHdr, IEM_ACCESS_DATA_RW);
8984 if (rcStrict != VINF_SUCCESS)
8985 return rcStrict;
8986
8987 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8988}
8989
8990
8991/**
8992 * Implements 'XRSTOR'.
8993 *
8994 * @param iEffSeg The effective segment.
8995 * @param GCPtrEff The address of the image.
8996 * @param enmEffOpSize The operand size (only REX.W really matters).
8997 */
8998IEM_CIMPL_DEF_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8999{
9000 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
9001
9002 /*
9003 * Raise exceptions.
9004 */
9005 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
9006 return iemRaiseUndefinedOpcode(pVCpu);
9007 /* When in VMX non-root mode and XSAVE/XRSTOR is not enabled, it results in #UD. */
9008 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
9009 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_XSAVES_XRSTORS))
9010 {
9011 Log(("xrstor: Not enabled for nested-guest execution -> #UD\n"));
9012 return iemRaiseUndefinedOpcode(pVCpu);
9013 }
9014 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)
9015 return iemRaiseDeviceNotAvailable(pVCpu);
9016 if (GCPtrEff & 63)
9017 {
9018 /** @todo CPU/VM detection possible! \#AC might not be signal for
9019 * all/any misalignment sizes, intel says its an implementation detail. */
9020 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
9021 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC
9022 && IEM_GET_CPL(pVCpu) == 3)
9023 return iemRaiseAlignmentCheckException(pVCpu);
9024 return iemRaiseGeneralProtectionFault0(pVCpu);
9025 }
9026
9027/** @todo figure out the exact protocol for the memory access. Currently we
9028 * just need this crap to work halfways to make it possible to test
9029 * AVX instructions. */
9030/** @todo figure out the XINUSE and XMODIFIED */
9031
9032 /*
9033 * Access the x87 memory state.
9034 */
9035 /* The x87+SSE state. */
9036 void *pvMem512;
9037 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
9038 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
9039 if (rcStrict != VINF_SUCCESS)
9040 return rcStrict;
9041 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
9042 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.XState.x87;
9043
9044 /*
9045 * Calc the requested mask
9046 */
9047 PX86XSAVEHDR pHdrDst = &pVCpu->cpum.GstCtx.XState.Hdr;
9048 PCX86XSAVEHDR pHdrSrc;
9049 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512,
9050 IEM_ACCESS_DATA_R, 0 /* checked above */);
9051 if (rcStrict != VINF_SUCCESS)
9052 return rcStrict;
9053
9054 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0];
9055 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
9056 //uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0];
9057 uint64_t const fRstorMask = pHdrSrc->bmXState;
9058 uint64_t const fCompMask = pHdrSrc->bmXComp;
9059
9060 AssertLogRelReturn(!(fCompMask & XSAVE_C_X), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
9061
9062 uint32_t const cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
9063
9064 /* We won't need this any longer. */
9065 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pHdrSrc, IEM_ACCESS_DATA_R);
9066 if (rcStrict != VINF_SUCCESS)
9067 return rcStrict;
9068
9069 /*
9070 * Load the X87 state.
9071 */
9072 if (fReqComponents & XSAVE_C_X87)
9073 {
9074 if (fRstorMask & XSAVE_C_X87)
9075 {
9076 pDst->FCW = pSrc->FCW;
9077 pDst->FSW = pSrc->FSW;
9078 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
9079 pDst->FOP = pSrc->FOP;
9080 pDst->FPUIP = pSrc->FPUIP;
9081 pDst->CS = pSrc->CS;
9082 pDst->FPUDP = pSrc->FPUDP;
9083 pDst->DS = pSrc->DS;
9084 if (enmEffOpSize == IEMMODE_64BIT)
9085 {
9086 /* Load upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
9087 pDst->Rsrvd1 = pSrc->Rsrvd1;
9088 pDst->Rsrvd2 = pSrc->Rsrvd2;
9089 }
9090 else
9091 {
9092 pDst->Rsrvd1 = 0;
9093 pDst->Rsrvd2 = 0;
9094 }
9095 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
9096 {
9097 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
9098 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
9099 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
9100 pDst->aRegs[i].au32[3] = 0;
9101 }
9102
9103 pDst->FCW &= ~X86_FCW_ZERO_MASK | X86_FCW_IC_MASK; /* Intel 10980xe allows setting the IC bit. Win 3.11 CALC.EXE sets it. */
9104 iemFpuRecalcExceptionStatus(pDst);
9105
9106 if (pDst->FSW & X86_FSW_ES)
9107 Log11(("xrstor: %04x:%08RX64: loading state with pending FPU exception (FSW=%#x)\n",
9108 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pSrc->FSW));
9109 }
9110 else
9111 {
9112 pDst->FCW = 0x37f;
9113 pDst->FSW = 0;
9114 pDst->FTW = 0x00; /* 0 - empty. */
9115 pDst->FPUDP = 0;
9116 pDst->DS = 0; //??
9117 pDst->Rsrvd2= 0;
9118 pDst->FPUIP = 0;
9119 pDst->CS = 0; //??
9120 pDst->Rsrvd1= 0;
9121 pDst->FOP = 0;
9122 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
9123 {
9124 pDst->aRegs[i].au32[0] = 0;
9125 pDst->aRegs[i].au32[1] = 0;
9126 pDst->aRegs[i].au32[2] = 0;
9127 pDst->aRegs[i].au32[3] = 0;
9128 }
9129 }
9130 pHdrDst->bmXState |= XSAVE_C_X87; /* playing safe for now */
9131 }
9132
9133 /* MXCSR */
9134 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
9135 {
9136 if (fRstorMask & (XSAVE_C_SSE | XSAVE_C_YMM))
9137 pDst->MXCSR = pSrc->MXCSR;
9138 else
9139 pDst->MXCSR = 0x1f80;
9140 }
9141
9142 /* XMM registers. */
9143 if (fReqComponents & XSAVE_C_SSE)
9144 {
9145 if (fRstorMask & XSAVE_C_SSE)
9146 {
9147 for (uint32_t i = 0; i < cXmmRegs; i++)
9148 pDst->aXMM[i] = pSrc->aXMM[i];
9149 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
9150 * right? */
9151 }
9152 else
9153 {
9154 for (uint32_t i = 0; i < cXmmRegs; i++)
9155 {
9156 pDst->aXMM[i].au64[0] = 0;
9157 pDst->aXMM[i].au64[1] = 0;
9158 }
9159 }
9160 pHdrDst->bmXState |= XSAVE_C_SSE; /* playing safe for now */
9161 }
9162
9163 /* Unmap the x87 state bits (so we've don't run out of mapping). */
9164 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
9165 if (rcStrict != VINF_SUCCESS)
9166 return rcStrict;
9167
9168 /*
9169 * Restore AVX state.
9170 */
9171 if (fReqComponents & XSAVE_C_YMM)
9172 {
9173 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
9174 PX86XSAVEYMMHI pCompDst = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PX86XSAVEYMMHI);
9175
9176 if (fRstorMask & XSAVE_C_YMM)
9177 {
9178 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
9179 PCX86XSAVEYMMHI pCompSrc;
9180 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, sizeof(*pCompDst),
9181 iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
9182 IEM_ACCESS_DATA_R, 0 /* checked above */);
9183 if (rcStrict != VINF_SUCCESS)
9184 return rcStrict;
9185
9186 for (uint32_t i = 0; i < cXmmRegs; i++)
9187 {
9188 pCompDst->aYmmHi[i].au64[0] = pCompSrc->aYmmHi[i].au64[0];
9189 pCompDst->aYmmHi[i].au64[1] = pCompSrc->aYmmHi[i].au64[1];
9190 }
9191
9192 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pCompSrc, IEM_ACCESS_DATA_R);
9193 if (rcStrict != VINF_SUCCESS)
9194 return rcStrict;
9195 }
9196 else
9197 {
9198 for (uint32_t i = 0; i < cXmmRegs; i++)
9199 {
9200 pCompDst->aYmmHi[i].au64[0] = 0;
9201 pCompDst->aYmmHi[i].au64[1] = 0;
9202 }
9203 }
9204 pHdrDst->bmXState |= XSAVE_C_YMM; /* playing safe for now */
9205 }
9206
9207 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9208}
9209
9210
9211
9212
9213/**
9214 * Implements 'STMXCSR'.
9215 *
9216 * @param iEffSeg The effective segment register for @a GCPtrEff.
9217 * @param GCPtrEff The address of the image.
9218 */
9219IEM_CIMPL_DEF_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9220{
9221 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
9222
9223 /*
9224 * Raise exceptions.
9225 */
9226 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
9227 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
9228 {
9229 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9230 {
9231 /*
9232 * Do the job.
9233 */
9234 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.XState.x87.MXCSR);
9235 if (rcStrict == VINF_SUCCESS)
9236 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9237 return rcStrict;
9238 }
9239 return iemRaiseDeviceNotAvailable(pVCpu);
9240 }
9241 return iemRaiseUndefinedOpcode(pVCpu);
9242}
9243
9244
9245/**
9246 * Implements 'VSTMXCSR'.
9247 *
9248 * @param iEffSeg The effective segment register for @a GCPtrEff.
9249 * @param GCPtrEff The address of the image.
9250 */
9251IEM_CIMPL_DEF_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9252{
9253 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_XCRx);
9254
9255 /*
9256 * Raise exceptions.
9257 */
9258 if ( ( !IEM_IS_GUEST_CPU_AMD(pVCpu)
9259 ? (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM)
9260 : !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)) /* AMD Jaguar CPU (f0x16,m0,s1) behaviour */
9261 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
9262 {
9263 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9264 {
9265 /*
9266 * Do the job.
9267 */
9268 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.XState.x87.MXCSR);
9269 if (rcStrict == VINF_SUCCESS)
9270 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9271 return rcStrict;
9272 }
9273 return iemRaiseDeviceNotAvailable(pVCpu);
9274 }
9275 return iemRaiseUndefinedOpcode(pVCpu);
9276}
9277
9278
9279/**
9280 * Implements 'LDMXCSR'.
9281 *
9282 * @param iEffSeg The effective segment register for @a GCPtrEff.
9283 * @param GCPtrEff The address of the image.
9284 */
9285IEM_CIMPL_DEF_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9286{
9287 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
9288
9289 /*
9290 * Raise exceptions.
9291 */
9292 /** @todo testcase - order of LDMXCSR faults. Does \#PF, \#GP and \#SS
9293 * happen after or before \#UD and \#EM? */
9294 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
9295 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
9296 {
9297 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9298 {
9299 /*
9300 * Do the job.
9301 */
9302 uint32_t fNewMxCsr;
9303 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, &fNewMxCsr, iEffSeg, GCPtrEff);
9304 if (rcStrict == VINF_SUCCESS)
9305 {
9306 uint32_t const fMxCsrMask = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
9307 if (!(fNewMxCsr & ~fMxCsrMask))
9308 {
9309 pVCpu->cpum.GstCtx.XState.x87.MXCSR = fNewMxCsr;
9310 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9311 }
9312 Log(("ldmxcsr: New MXCSR=%#RX32 & ~MASK=%#RX32 = %#RX32 -> #GP(0)\n",
9313 fNewMxCsr, fMxCsrMask, fNewMxCsr & ~fMxCsrMask));
9314 return iemRaiseGeneralProtectionFault0(pVCpu);
9315 }
9316 return rcStrict;
9317 }
9318 return iemRaiseDeviceNotAvailable(pVCpu);
9319 }
9320 return iemRaiseUndefinedOpcode(pVCpu);
9321}
9322
9323
9324/**
9325 * Commmon routine for fnstenv and fnsave.
9326 *
9327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9328 * @param enmEffOpSize The effective operand size.
9329 * @param uPtr Where to store the state.
9330 */
9331static void iemCImplCommonFpuStoreEnv(PVMCPUCC pVCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr)
9332{
9333 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9334 PCX86FXSTATE pSrcX87 = &pVCpu->cpum.GstCtx.XState.x87;
9335 if (enmEffOpSize == IEMMODE_16BIT)
9336 {
9337 uPtr.pu16[0] = pSrcX87->FCW;
9338 uPtr.pu16[1] = pSrcX87->FSW;
9339 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87);
9340 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9341 {
9342 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
9343 * protected mode or long mode and we save it in real mode? And vice
9344 * versa? And with 32-bit operand size? I think CPU is storing the
9345 * effective address ((CS << 4) + IP) in the offset register and not
9346 * doing any address calculations here. */
9347 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP;
9348 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP;
9349 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP;
9350 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000);
9351 }
9352 else
9353 {
9354 uPtr.pu16[3] = pSrcX87->FPUIP;
9355 uPtr.pu16[4] = pSrcX87->CS;
9356 uPtr.pu16[5] = pSrcX87->FPUDP;
9357 uPtr.pu16[6] = pSrcX87->DS;
9358 }
9359 }
9360 else
9361 {
9362 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
9363 uPtr.pu16[0*2] = pSrcX87->FCW;
9364 uPtr.pu16[0*2+1] = 0xffff; /* (0xffff observed on intel skylake.) */
9365 uPtr.pu16[1*2] = pSrcX87->FSW;
9366 uPtr.pu16[1*2+1] = 0xffff;
9367 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87);
9368 uPtr.pu16[2*2+1] = 0xffff;
9369 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9370 {
9371 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP;
9372 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP;
9373 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP;
9374 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4;
9375 }
9376 else
9377 {
9378 uPtr.pu32[3] = pSrcX87->FPUIP;
9379 uPtr.pu16[4*2] = pSrcX87->CS;
9380 uPtr.pu16[4*2+1] = pSrcX87->FOP;
9381 uPtr.pu32[5] = pSrcX87->FPUDP;
9382 uPtr.pu16[6*2] = pSrcX87->DS;
9383 uPtr.pu16[6*2+1] = 0xffff;
9384 }
9385 }
9386}
9387
9388
9389/**
9390 * Commmon routine for fldenv and frstor
9391 *
9392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9393 * @param enmEffOpSize The effective operand size.
9394 * @param uPtr Where to store the state.
9395 */
9396static void iemCImplCommonFpuRestoreEnv(PVMCPUCC pVCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr)
9397{
9398 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9399 PX86FXSTATE pDstX87 = &pVCpu->cpum.GstCtx.XState.x87;
9400 if (enmEffOpSize == IEMMODE_16BIT)
9401 {
9402 pDstX87->FCW = uPtr.pu16[0];
9403 pDstX87->FSW = uPtr.pu16[1];
9404 pDstX87->FTW = uPtr.pu16[2];
9405 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9406 {
9407 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
9408 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
9409 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
9410 pDstX87->CS = 0;
9411 pDstX87->Rsrvd1= 0;
9412 pDstX87->DS = 0;
9413 pDstX87->Rsrvd2= 0;
9414 }
9415 else
9416 {
9417 pDstX87->FPUIP = uPtr.pu16[3];
9418 pDstX87->CS = uPtr.pu16[4];
9419 pDstX87->Rsrvd1= 0;
9420 pDstX87->FPUDP = uPtr.pu16[5];
9421 pDstX87->DS = uPtr.pu16[6];
9422 pDstX87->Rsrvd2= 0;
9423 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
9424 }
9425 }
9426 else
9427 {
9428 pDstX87->FCW = uPtr.pu16[0*2];
9429 pDstX87->FSW = uPtr.pu16[1*2];
9430 pDstX87->FTW = uPtr.pu16[2*2];
9431 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9432 {
9433 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
9434 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
9435 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
9436 pDstX87->CS = 0;
9437 pDstX87->Rsrvd1= 0;
9438 pDstX87->DS = 0;
9439 pDstX87->Rsrvd2= 0;
9440 }
9441 else
9442 {
9443 pDstX87->FPUIP = uPtr.pu32[3];
9444 pDstX87->CS = uPtr.pu16[4*2];
9445 pDstX87->Rsrvd1= 0;
9446 pDstX87->FOP = uPtr.pu16[4*2+1];
9447 pDstX87->FPUDP = uPtr.pu32[5];
9448 pDstX87->DS = uPtr.pu16[6*2];
9449 pDstX87->Rsrvd2= 0;
9450 }
9451 }
9452
9453 /* Make adjustments. */
9454 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW);
9455#ifdef LOG_ENABLED
9456 uint16_t const fOldFsw = pDstX87->FSW;
9457#endif
9458 pDstX87->FCW &= ~X86_FCW_ZERO_MASK | X86_FCW_IC_MASK; /* Intel 10980xe allows setting the IC bit. Win 3.11 CALC.EXE sets it. */
9459 iemFpuRecalcExceptionStatus(pDstX87);
9460#ifdef LOG_ENABLED
9461 if ((pDstX87->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9462 Log11(("iemCImplCommonFpuRestoreEnv: %04x:%08RX64: %s FPU exception (FCW=%#x FSW=%#x -> %#x)\n",
9463 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fOldFsw & X86_FSW_ES ? "Supressed" : "Raised",
9464 pDstX87->FCW, fOldFsw, pDstX87->FSW));
9465#endif
9466
9467 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
9468 * exceptions are pending after loading the saved state? */
9469}
9470
9471
9472/**
9473 * Implements 'FNSTENV'.
9474 *
9475 * @param enmEffOpSize The operand size (only REX.W really matters).
9476 * @param iEffSeg The effective segment register for @a GCPtrEffDst.
9477 * @param GCPtrEffDst The address of the image.
9478 */
9479IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
9480{
9481 RTPTRUNION uPtr;
9482 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
9483 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
9484 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ? */);
9485 if (rcStrict != VINF_SUCCESS)
9486 return rcStrict;
9487
9488 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr);
9489
9490 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
9491 if (rcStrict != VINF_SUCCESS)
9492 return rcStrict;
9493
9494 /* Mask all math exceptions. Any possibly pending exceptions will be cleared. */
9495 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9496 pFpuCtx->FCW |= X86_FCW_XCPT_MASK;
9497#ifdef LOG_ENABLED
9498 uint16_t fOldFsw = pFpuCtx->FSW;
9499#endif
9500 iemFpuRecalcExceptionStatus(pFpuCtx);
9501#ifdef LOG_ENABLED
9502 if ((pFpuCtx->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9503 Log11(("fnstenv: %04x:%08RX64: %s FPU exception (FCW=%#x, FSW %#x -> %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9504 fOldFsw & X86_FSW_ES ? "Supressed" : "Raised", pFpuCtx->FCW, fOldFsw, pFpuCtx->FSW));
9505#endif
9506
9507 iemHlpUsedFpu(pVCpu);
9508
9509 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
9510 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9511}
9512
9513
9514/**
9515 * Implements 'FNSAVE'.
9516 *
9517 * @param enmEffOpSize The operand size.
9518 * @param iEffSeg The effective segment register for @a GCPtrEffDst.
9519 * @param GCPtrEffDst The address of the image.
9520 */
9521IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
9522{
9523 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9524
9525 RTPTRUNION uPtr;
9526 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
9527 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 3 /** @todo ? */);
9528 if (rcStrict != VINF_SUCCESS)
9529 return rcStrict;
9530
9531 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9532 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr);
9533 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
9534 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
9535 {
9536 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0];
9537 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1];
9538 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4];
9539 }
9540
9541 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
9542 if (rcStrict != VINF_SUCCESS)
9543 return rcStrict;
9544
9545 /* Rotate the stack to account for changed TOS. */
9546 iemFpuRotateStackSetTop(pFpuCtx, 0);
9547
9548 /*
9549 * Re-initialize the FPU context.
9550 */
9551 pFpuCtx->FCW = 0x37f;
9552 pFpuCtx->FSW = 0;
9553 pFpuCtx->FTW = 0x00; /* 0 - empty */
9554 pFpuCtx->FPUDP = 0;
9555 pFpuCtx->DS = 0;
9556 pFpuCtx->Rsrvd2= 0;
9557 pFpuCtx->FPUIP = 0;
9558 pFpuCtx->CS = 0;
9559 pFpuCtx->Rsrvd1= 0;
9560 pFpuCtx->FOP = 0;
9561
9562 iemHlpUsedFpu(pVCpu);
9563 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9564}
9565
9566
9567
9568/**
9569 * Implements 'FLDENV'.
9570 *
9571 * @param enmEffOpSize The operand size (only REX.W really matters).
9572 * @param iEffSeg The effective segment register for @a GCPtrEffSrc.
9573 * @param GCPtrEffSrc The address of the image.
9574 */
9575IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
9576{
9577 RTCPTRUNION uPtr;
9578 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
9579 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R,
9580 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ?*/);
9581 if (rcStrict != VINF_SUCCESS)
9582 return rcStrict;
9583
9584 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr);
9585
9586 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
9587 if (rcStrict != VINF_SUCCESS)
9588 return rcStrict;
9589
9590 iemHlpUsedFpu(pVCpu);
9591 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9592}
9593
9594
9595/**
9596 * Implements 'FRSTOR'.
9597 *
9598 * @param enmEffOpSize The operand size.
9599 * @param iEffSeg The effective segment register for @a GCPtrEffSrc.
9600 * @param GCPtrEffSrc The address of the image.
9601 */
9602IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
9603{
9604 RTCPTRUNION uPtr;
9605 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
9606 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R, 3 /** @todo ?*/ );
9607 if (rcStrict != VINF_SUCCESS)
9608 return rcStrict;
9609
9610 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9611 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr);
9612 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
9613 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
9614 {
9615 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0];
9616 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1];
9617 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4];
9618 pFpuCtx->aRegs[i].au32[3] = 0;
9619 }
9620
9621 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
9622 if (rcStrict != VINF_SUCCESS)
9623 return rcStrict;
9624
9625 iemHlpUsedFpu(pVCpu);
9626 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9627}
9628
9629
9630/**
9631 * Implements 'FLDCW'.
9632 *
9633 * @param u16Fcw The new FCW.
9634 */
9635IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
9636{
9637 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9638
9639 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
9640 /** @todo Testcase: Try see what happens when trying to set undefined bits
9641 * (other than 6 and 7). Currently ignoring them. */
9642 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
9643 * according to FSW. (This is what is currently implemented.) */
9644 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9645 pFpuCtx->FCW = u16Fcw & (~X86_FCW_ZERO_MASK | X86_FCW_IC_MASK); /* Intel 10980xe allows setting the IC bit. Win 3.11 CALC.EXE sets it. */
9646#ifdef LOG_ENABLED
9647 uint16_t fOldFsw = pFpuCtx->FSW;
9648#endif
9649 iemFpuRecalcExceptionStatus(pFpuCtx);
9650#ifdef LOG_ENABLED
9651 if ((pFpuCtx->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9652 Log11(("fldcw: %04x:%08RX64: %s FPU exception (FCW=%#x, FSW %#x -> %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9653 fOldFsw & X86_FSW_ES ? "Supressed" : "Raised", pFpuCtx->FCW, fOldFsw, pFpuCtx->FSW));
9654#endif
9655
9656 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
9657 iemHlpUsedFpu(pVCpu);
9658 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9659}
9660
9661
9662
9663/**
9664 * Implements the underflow case of fxch.
9665 *
9666 * @param iStReg The other stack register.
9667 * @param uFpuOpcode The FPU opcode (for simplicity).
9668 */
9669IEM_CIMPL_DEF_2(iemCImpl_fxch_underflow, uint8_t, iStReg, uint16_t, uFpuOpcode)
9670{
9671 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9672
9673 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9674 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW);
9675 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
9676 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW));
9677
9678 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
9679 * registers are read as QNaN and then exchanged. This could be
9680 * wrong... */
9681 if (pFpuCtx->FCW & X86_FCW_IM)
9682 {
9683 if (RT_BIT(iReg1) & pFpuCtx->FTW)
9684 {
9685 if (RT_BIT(iReg2) & pFpuCtx->FTW)
9686 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
9687 else
9688 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80;
9689 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
9690 }
9691 else
9692 {
9693 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80;
9694 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
9695 }
9696 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
9697 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
9698 }
9699 else
9700 {
9701 /* raise underflow exception, don't change anything. */
9702 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
9703 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
9704 Log11(("fxch: %04x:%08RX64: Underflow exception (FSW=%#x)\n",
9705 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
9706 }
9707
9708 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
9709 iemHlpUsedFpu(pVCpu);
9710 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9711}
9712
9713
9714/**
9715 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
9716 *
9717 * @param iStReg The other stack register.
9718 * @param pfnAImpl The assembly comparison implementation.
9719 * @param uPopAndFpuOpcode Bits 15-0: The FPU opcode.
9720 * Bit 31: Whether we should pop the stack when
9721 * done or not.
9722 */
9723IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, uint32_t, uPopAndFpuOpcode)
9724{
9725 Assert(iStReg < 8);
9726 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9727
9728 /*
9729 * Raise exceptions.
9730 */
9731 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS))
9732 return iemRaiseDeviceNotAvailable(pVCpu);
9733
9734 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9735 uint16_t u16Fsw = pFpuCtx->FSW;
9736 if (u16Fsw & X86_FSW_ES)
9737 return iemRaiseMathFault(pVCpu);
9738
9739 /*
9740 * Check if any of the register accesses causes #SF + #IA.
9741 */
9742 bool fPop = RT_BOOL(uPopAndFpuOpcode & RT_BIT_32(31));
9743 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
9744 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
9745 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
9746 {
9747 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
9748
9749 pFpuCtx->FSW &= ~X86_FSW_C1;
9750 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
9751 if ( !(u16Fsw & X86_FSW_IE)
9752 || (pFpuCtx->FCW & X86_FCW_IM) )
9753 {
9754 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9755 pVCpu->cpum.GstCtx.eflags.u |= u32Eflags & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9756 }
9757 }
9758 else if (pFpuCtx->FCW & X86_FCW_IM)
9759 {
9760 /* Masked underflow. */
9761 pFpuCtx->FSW &= ~X86_FSW_C1;
9762 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
9763 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9764 pVCpu->cpum.GstCtx.eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
9765 }
9766 else
9767 {
9768 /* Raise underflow - don't touch EFLAGS or TOP. */
9769 pFpuCtx->FSW &= ~X86_FSW_C1;
9770 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
9771 Log11(("fxch: %04x:%08RX64: Raising IE+SF exception (FSW=%#x)\n",
9772 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
9773 fPop = false;
9774 }
9775
9776 /*
9777 * Pop if necessary.
9778 */
9779 if (fPop)
9780 {
9781 pFpuCtx->FTW &= ~RT_BIT(iReg1);
9782 iemFpuStackIncTop(pVCpu);
9783 }
9784
9785 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, (uint16_t)uPopAndFpuOpcode);
9786 iemHlpUsedFpu(pVCpu);
9787 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9788}
9789
9790/** @} */
9791
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette