VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp@ 99654

Last change on this file since 99654 was 99651, checked in by vboxsync, 20 months ago

VMM/IEM: Nested VMX: bugref:10318 Clear "NMI unblocked due to IRET" state on completion of IRET emulation.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 353.4 KB
Line 
1/* $Id: IEMAllCImpl.cpp 99651 2023-05-08 07:04:05Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/apic.h>
37#include <VBox/vmm/pdm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/iom.h>
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/hm.h>
42#include <VBox/vmm/nem.h>
43#include <VBox/vmm/gim.h>
44#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
45# include <VBox/vmm/em.h>
46# include <VBox/vmm/hm_svm.h>
47#endif
48#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
49# include <VBox/vmm/hmvmxinline.h>
50#endif
51#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
52# include <VBox/vmm/cpuidcall.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <iprt/asm-math.h>
64#include <iprt/assert.h>
65#include <iprt/string.h>
66#include <iprt/x86.h>
67
68#include "IEMInline.h"
69
70
71/*********************************************************************************************************************************
72* Defined Constants And Macros *
73*********************************************************************************************************************************/
74/**
75 * Flushes the prefetch buffer, light version.
76 */
77#ifndef IEM_WITH_CODE_TLB
78# define IEM_FLUSH_PREFETCH_LIGHT(a_pVCpu, a_cbInstr) do { (a_pVCpu)->iem.s.cbOpcode = (a_cbInstr); } while (0)
79#else
80# define IEM_FLUSH_PREFETCH_LIGHT(a_pVCpu, a_cbInstr) do { } while (0)
81#endif
82
83/**
84 * Flushes the prefetch buffer, heavy version.
85 */
86#ifndef IEM_WITH_CODE_TLB
87# define IEM_FLUSH_PREFETCH_HEAVY(a_pVCpu, a_cbInstr) do { (a_pVCpu)->iem.s.cbOpcode = (a_cbInstr); } while (0)
88#else
89# if 1
90# define IEM_FLUSH_PREFETCH_HEAVY(a_pVCpu, a_cbInstr) do { (a_pVCpu)->iem.s.pbInstrBuf = NULL; } while (0)
91# else
92# define IEM_FLUSH_PREFETCH_HEAVY(a_pVCpu, a_cbInstr) do { } while (0)
93# endif
94#endif
95
96
97
98/** @name Misc Helpers
99 * @{
100 */
101
102
103/**
104 * Worker function for iemHlpCheckPortIOPermission, don't call directly.
105 *
106 * @returns Strict VBox status code.
107 *
108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
109 * @param u16Port The port number.
110 * @param cbOperand The operand size.
111 */
112static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PVMCPUCC pVCpu, uint16_t u16Port, uint8_t cbOperand)
113{
114 /* The TSS bits we're interested in are the same on 386 and AMD64. */
115 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY);
116 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
117 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap);
118 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64));
119
120 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
121
122 /*
123 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
124 */
125 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType);
126 if (RT_UNLIKELY( pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY
127 && pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))
128 {
129 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
130 u16Port, cbOperand, pVCpu->cpum.GstCtx.tr.Attr.n.u4Type, pVCpu->cpum.GstCtx.tr.Attr.u));
131 return iemRaiseGeneralProtectionFault0(pVCpu);
132 }
133
134 /*
135 * Read the bitmap offset (may #PF).
136 */
137 uint16_t offBitmap;
138 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &offBitmap, UINT8_MAX,
139 pVCpu->cpum.GstCtx.tr.u64Base + RT_UOFFSETOF(X86TSS64, offIoBitmap));
140 if (rcStrict != VINF_SUCCESS)
141 {
142 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
143 return rcStrict;
144 }
145
146 /*
147 * The bit range from u16Port to (u16Port + cbOperand - 1), however intel
148 * describes the CPU actually reading two bytes regardless of whether the
149 * bit range crosses a byte boundrary. Thus the + 1 in the test below.
150 */
151 uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap;
152 /** @todo check if real CPUs ensures that offBitmap has a minimum value of
153 * for instance sizeof(X86TSS32). */
154 if (offFirstBit + 1 > pVCpu->cpum.GstCtx.tr.u32Limit) /* the limit is inclusive */
155 {
156 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
157 offFirstBit, pVCpu->cpum.GstCtx.tr.u32Limit));
158 return iemRaiseGeneralProtectionFault0(pVCpu);
159 }
160
161 /*
162 * Read the necessary bits.
163 */
164 /** @todo Test the assertion in the intel manual that the CPU reads two
165 * bytes. The question is how this works wrt to \#PF and \#GP on the
166 * 2nd byte when it's not required. */
167 uint16_t bmBytes = UINT16_MAX;
168 rcStrict = iemMemFetchSysU16(pVCpu, &bmBytes, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + offFirstBit);
169 if (rcStrict != VINF_SUCCESS)
170 {
171 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
172 return rcStrict;
173 }
174
175 /*
176 * Perform the check.
177 */
178 uint16_t fPortMask = (1 << cbOperand) - 1;
179 bmBytes >>= (u16Port & 7);
180 if (bmBytes & fPortMask)
181 {
182 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
183 u16Port, cbOperand, bmBytes, fPortMask));
184 return iemRaiseGeneralProtectionFault0(pVCpu);
185 }
186
187 return VINF_SUCCESS;
188}
189
190
191/**
192 * Checks if we are allowed to access the given I/O port, raising the
193 * appropriate exceptions if we aren't (or if the I/O bitmap is not
194 * accessible).
195 *
196 * @returns Strict VBox status code.
197 *
198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
199 * @param u16Port The port number.
200 * @param cbOperand The operand size.
201 */
202DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PVMCPUCC pVCpu, uint16_t u16Port, uint8_t cbOperand)
203{
204 X86EFLAGS Efl;
205 Efl.u = IEMMISC_GET_EFL(pVCpu);
206 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
207 && ( pVCpu->iem.s.uCpl > Efl.Bits.u2IOPL
208 || Efl.Bits.u1VM) )
209 return iemHlpCheckPortIOPermissionBitmap(pVCpu, u16Port, cbOperand);
210 return VINF_SUCCESS;
211}
212
213
214#if 0
215/**
216 * Calculates the parity bit.
217 *
218 * @returns true if the bit is set, false if not.
219 * @param u8Result The least significant byte of the result.
220 */
221static bool iemHlpCalcParityFlag(uint8_t u8Result)
222{
223 /*
224 * Parity is set if the number of bits in the least significant byte of
225 * the result is even.
226 */
227 uint8_t cBits;
228 cBits = u8Result & 1; /* 0 */
229 u8Result >>= 1;
230 cBits += u8Result & 1;
231 u8Result >>= 1;
232 cBits += u8Result & 1;
233 u8Result >>= 1;
234 cBits += u8Result & 1;
235 u8Result >>= 1;
236 cBits += u8Result & 1; /* 4 */
237 u8Result >>= 1;
238 cBits += u8Result & 1;
239 u8Result >>= 1;
240 cBits += u8Result & 1;
241 u8Result >>= 1;
242 cBits += u8Result & 1;
243 return !(cBits & 1);
244}
245#endif /* not used */
246
247
248/**
249 * Updates the specified flags according to a 8-bit result.
250 *
251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
252 * @param u8Result The result to set the flags according to.
253 * @param fToUpdate The flags to update.
254 * @param fUndefined The flags that are specified as undefined.
255 */
256static void iemHlpUpdateArithEFlagsU8(PVMCPUCC pVCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
257{
258 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
259 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
260 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined);
261 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags;
262}
263
264
265/**
266 * Updates the specified flags according to a 16-bit result.
267 *
268 * @param pVCpu The cross context virtual CPU structure of the calling thread.
269 * @param u16Result The result to set the flags according to.
270 * @param fToUpdate The flags to update.
271 * @param fUndefined The flags that are specified as undefined.
272 */
273static void iemHlpUpdateArithEFlagsU16(PVMCPUCC pVCpu, uint16_t u16Result, uint32_t fToUpdate, uint32_t fUndefined)
274{
275 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
276 iemAImpl_test_u16(&u16Result, u16Result, &fEFlags);
277 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined);
278 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags;
279}
280
281
282/**
283 * Helper used by iret.
284 *
285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
286 * @param uCpl The new CPL.
287 * @param pSReg Pointer to the segment register.
288 */
289static void iemHlpAdjustSelectorForNewCpl(PVMCPUCC pVCpu, uint8_t uCpl, PCPUMSELREG pSReg)
290{
291 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
292 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_MASK);
293
294 if ( uCpl > pSReg->Attr.n.u2Dpl
295 && pSReg->Attr.n.u1DescType /* code or data, not system */
296 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
297 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
298 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, 0);
299}
300
301
302/**
303 * Indicates that we have modified the FPU state.
304 *
305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
306 */
307DECLINLINE(void) iemHlpUsedFpu(PVMCPUCC pVCpu)
308{
309 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
310}
311
312/** @} */
313
314/** @name C Implementations
315 * @{
316 */
317
318/**
319 * Implements a 16-bit popa.
320 */
321IEM_CIMPL_DEF_0(iemCImpl_popa_16)
322{
323 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu);
324 RTGCPTR GCPtrLast = GCPtrStart + 15;
325 VBOXSTRICTRC rcStrict;
326
327 /*
328 * The docs are a bit hard to comprehend here, but it looks like we wrap
329 * around in real mode as long as none of the individual "popa" crosses the
330 * end of the stack segment. In protected mode we check the whole access
331 * in one go. For efficiency, only do the word-by-word thing if we're in
332 * danger of wrapping around.
333 */
334 /** @todo do popa boundary / wrap-around checks. */
335 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
336 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
337 {
338 /* word-by-word */
339 RTUINT64U TmpRsp;
340 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
341 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.di, &TmpRsp);
342 if (rcStrict == VINF_SUCCESS)
343 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.si, &TmpRsp);
344 if (rcStrict == VINF_SUCCESS)
345 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bp, &TmpRsp);
346 if (rcStrict == VINF_SUCCESS)
347 {
348 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */
349 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bx, &TmpRsp);
350 }
351 if (rcStrict == VINF_SUCCESS)
352 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.dx, &TmpRsp);
353 if (rcStrict == VINF_SUCCESS)
354 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.cx, &TmpRsp);
355 if (rcStrict == VINF_SUCCESS)
356 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.ax, &TmpRsp);
357 if (rcStrict == VINF_SUCCESS)
358 {
359 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
360 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
361 }
362 }
363 else
364 {
365 uint16_t const *pa16Mem = NULL;
366 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa16Mem) - 1);
367 if (rcStrict == VINF_SUCCESS)
368 {
369 pVCpu->cpum.GstCtx.di = pa16Mem[7 - X86_GREG_xDI];
370 pVCpu->cpum.GstCtx.si = pa16Mem[7 - X86_GREG_xSI];
371 pVCpu->cpum.GstCtx.bp = pa16Mem[7 - X86_GREG_xBP];
372 /* skip sp */
373 pVCpu->cpum.GstCtx.bx = pa16Mem[7 - X86_GREG_xBX];
374 pVCpu->cpum.GstCtx.dx = pa16Mem[7 - X86_GREG_xDX];
375 pVCpu->cpum.GstCtx.cx = pa16Mem[7 - X86_GREG_xCX];
376 pVCpu->cpum.GstCtx.ax = pa16Mem[7 - X86_GREG_xAX];
377 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
378 if (rcStrict == VINF_SUCCESS)
379 {
380 iemRegAddToRsp(pVCpu, 16);
381 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
382 }
383 }
384 }
385 return rcStrict;
386}
387
388
389/**
390 * Implements a 32-bit popa.
391 */
392IEM_CIMPL_DEF_0(iemCImpl_popa_32)
393{
394 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu);
395 RTGCPTR GCPtrLast = GCPtrStart + 31;
396 VBOXSTRICTRC rcStrict;
397
398 /*
399 * The docs are a bit hard to comprehend here, but it looks like we wrap
400 * around in real mode as long as none of the individual "popa" crosses the
401 * end of the stack segment. In protected mode we check the whole access
402 * in one go. For efficiency, only do the word-by-word thing if we're in
403 * danger of wrapping around.
404 */
405 /** @todo do popa boundary / wrap-around checks. */
406 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
407 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
408 {
409 /* word-by-word */
410 RTUINT64U TmpRsp;
411 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
412 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edi, &TmpRsp);
413 if (rcStrict == VINF_SUCCESS)
414 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.esi, &TmpRsp);
415 if (rcStrict == VINF_SUCCESS)
416 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebp, &TmpRsp);
417 if (rcStrict == VINF_SUCCESS)
418 {
419 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */
420 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebx, &TmpRsp);
421 }
422 if (rcStrict == VINF_SUCCESS)
423 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edx, &TmpRsp);
424 if (rcStrict == VINF_SUCCESS)
425 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ecx, &TmpRsp);
426 if (rcStrict == VINF_SUCCESS)
427 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.eax, &TmpRsp);
428 if (rcStrict == VINF_SUCCESS)
429 {
430#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
431 pVCpu->cpum.GstCtx.rdi &= UINT32_MAX;
432 pVCpu->cpum.GstCtx.rsi &= UINT32_MAX;
433 pVCpu->cpum.GstCtx.rbp &= UINT32_MAX;
434 pVCpu->cpum.GstCtx.rbx &= UINT32_MAX;
435 pVCpu->cpum.GstCtx.rdx &= UINT32_MAX;
436 pVCpu->cpum.GstCtx.rcx &= UINT32_MAX;
437 pVCpu->cpum.GstCtx.rax &= UINT32_MAX;
438#endif
439 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
440 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
441 }
442 }
443 else
444 {
445 uint32_t const *pa32Mem;
446 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa32Mem) - 1);
447 if (rcStrict == VINF_SUCCESS)
448 {
449 pVCpu->cpum.GstCtx.rdi = pa32Mem[7 - X86_GREG_xDI];
450 pVCpu->cpum.GstCtx.rsi = pa32Mem[7 - X86_GREG_xSI];
451 pVCpu->cpum.GstCtx.rbp = pa32Mem[7 - X86_GREG_xBP];
452 /* skip esp */
453 pVCpu->cpum.GstCtx.rbx = pa32Mem[7 - X86_GREG_xBX];
454 pVCpu->cpum.GstCtx.rdx = pa32Mem[7 - X86_GREG_xDX];
455 pVCpu->cpum.GstCtx.rcx = pa32Mem[7 - X86_GREG_xCX];
456 pVCpu->cpum.GstCtx.rax = pa32Mem[7 - X86_GREG_xAX];
457 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
458 if (rcStrict == VINF_SUCCESS)
459 {
460 iemRegAddToRsp(pVCpu, 32);
461 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
462 }
463 }
464 }
465 return rcStrict;
466}
467
468
469/**
470 * Implements a 16-bit pusha.
471 */
472IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
473{
474 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu);
475 RTGCPTR GCPtrBottom = GCPtrTop - 15;
476 VBOXSTRICTRC rcStrict;
477
478 /*
479 * The docs are a bit hard to comprehend here, but it looks like we wrap
480 * around in real mode as long as none of the individual "pushd" crosses the
481 * end of the stack segment. In protected mode we check the whole access
482 * in one go. For efficiency, only do the word-by-word thing if we're in
483 * danger of wrapping around.
484 */
485 /** @todo do pusha boundary / wrap-around checks. */
486 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
487 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
488 {
489 /* word-by-word */
490 RTUINT64U TmpRsp;
491 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
492 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.ax, &TmpRsp);
493 if (rcStrict == VINF_SUCCESS)
494 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.cx, &TmpRsp);
495 if (rcStrict == VINF_SUCCESS)
496 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.dx, &TmpRsp);
497 if (rcStrict == VINF_SUCCESS)
498 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bx, &TmpRsp);
499 if (rcStrict == VINF_SUCCESS)
500 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.sp, &TmpRsp);
501 if (rcStrict == VINF_SUCCESS)
502 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bp, &TmpRsp);
503 if (rcStrict == VINF_SUCCESS)
504 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.si, &TmpRsp);
505 if (rcStrict == VINF_SUCCESS)
506 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.di, &TmpRsp);
507 if (rcStrict == VINF_SUCCESS)
508 {
509 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
510 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
511 }
512 }
513 else
514 {
515 GCPtrBottom--;
516 uint16_t *pa16Mem = NULL;
517 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa16Mem) - 1);
518 if (rcStrict == VINF_SUCCESS)
519 {
520 pa16Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.di;
521 pa16Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.si;
522 pa16Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.bp;
523 pa16Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.sp;
524 pa16Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.bx;
525 pa16Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.dx;
526 pa16Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.cx;
527 pa16Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.ax;
528 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
529 if (rcStrict == VINF_SUCCESS)
530 {
531 iemRegSubFromRsp(pVCpu, 16);
532 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
533 }
534 }
535 }
536 return rcStrict;
537}
538
539
540/**
541 * Implements a 32-bit pusha.
542 */
543IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
544{
545 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu);
546 RTGCPTR GCPtrBottom = GCPtrTop - 31;
547 VBOXSTRICTRC rcStrict;
548
549 /*
550 * The docs are a bit hard to comprehend here, but it looks like we wrap
551 * around in real mode as long as none of the individual "pusha" crosses the
552 * end of the stack segment. In protected mode we check the whole access
553 * in one go. For efficiency, only do the word-by-word thing if we're in
554 * danger of wrapping around.
555 */
556 /** @todo do pusha boundary / wrap-around checks. */
557 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
558 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
559 {
560 /* word-by-word */
561 RTUINT64U TmpRsp;
562 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
563 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.eax, &TmpRsp);
564 if (rcStrict == VINF_SUCCESS)
565 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ecx, &TmpRsp);
566 if (rcStrict == VINF_SUCCESS)
567 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edx, &TmpRsp);
568 if (rcStrict == VINF_SUCCESS)
569 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebx, &TmpRsp);
570 if (rcStrict == VINF_SUCCESS)
571 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esp, &TmpRsp);
572 if (rcStrict == VINF_SUCCESS)
573 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebp, &TmpRsp);
574 if (rcStrict == VINF_SUCCESS)
575 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esi, &TmpRsp);
576 if (rcStrict == VINF_SUCCESS)
577 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edi, &TmpRsp);
578 if (rcStrict == VINF_SUCCESS)
579 {
580 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
581 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
582 }
583 }
584 else
585 {
586 GCPtrBottom--;
587 uint32_t *pa32Mem;
588 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa32Mem) - 1);
589 if (rcStrict == VINF_SUCCESS)
590 {
591 pa32Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.edi;
592 pa32Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.esi;
593 pa32Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.ebp;
594 pa32Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.esp;
595 pa32Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.ebx;
596 pa32Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.edx;
597 pa32Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.ecx;
598 pa32Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.eax;
599 rcStrict = iemMemCommitAndUnmap(pVCpu, pa32Mem, IEM_ACCESS_STACK_W);
600 if (rcStrict == VINF_SUCCESS)
601 {
602 iemRegSubFromRsp(pVCpu, 32);
603 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
604 }
605 }
606 }
607 return rcStrict;
608}
609
610
611/**
612 * Implements pushf.
613 *
614 *
615 * @param enmEffOpSize The effective operand size.
616 */
617IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
618{
619 VBOXSTRICTRC rcStrict;
620
621 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_PUSHF))
622 {
623 Log2(("pushf: Guest intercept -> #VMEXIT\n"));
624 IEM_SVM_UPDATE_NRIP(pVCpu);
625 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
626 }
627
628 /*
629 * If we're in V8086 mode some care is required (which is why we're in
630 * doing this in a C implementation).
631 */
632 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
633 if ( (fEfl & X86_EFL_VM)
634 && X86_EFL_GET_IOPL(fEfl) != 3 )
635 {
636 Assert(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE);
637 if ( enmEffOpSize != IEMMODE_16BIT
638 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME))
639 return iemRaiseGeneralProtectionFault0(pVCpu);
640 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
641 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
642 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
643 }
644 else
645 {
646
647 /*
648 * Ok, clear RF and VM, adjust for ancient CPUs, and push the flags.
649 */
650 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
651
652 switch (enmEffOpSize)
653 {
654 case IEMMODE_16BIT:
655 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
656 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_186)
657 fEfl |= UINT16_C(0xf000);
658 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
659 break;
660 case IEMMODE_32BIT:
661 rcStrict = iemMemStackPushU32(pVCpu, fEfl);
662 break;
663 case IEMMODE_64BIT:
664 rcStrict = iemMemStackPushU64(pVCpu, fEfl);
665 break;
666 IEM_NOT_REACHED_DEFAULT_CASE_RET();
667 }
668 }
669
670 if (rcStrict == VINF_SUCCESS)
671 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
672 return rcStrict;
673}
674
675
676/**
677 * Implements popf.
678 *
679 * @param enmEffOpSize The effective operand size.
680 */
681IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
682{
683 uint32_t const fEflOld = IEMMISC_GET_EFL(pVCpu);
684 VBOXSTRICTRC rcStrict;
685 uint32_t fEflNew;
686
687 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_POPF))
688 {
689 Log2(("popf: Guest intercept -> #VMEXIT\n"));
690 IEM_SVM_UPDATE_NRIP(pVCpu);
691 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
692 }
693
694 /*
695 * V8086 is special as usual.
696 */
697 if (fEflOld & X86_EFL_VM)
698 {
699 /*
700 * Almost anything goes if IOPL is 3.
701 */
702 if (X86_EFL_GET_IOPL(fEflOld) == 3)
703 {
704 switch (enmEffOpSize)
705 {
706 case IEMMODE_16BIT:
707 {
708 uint16_t u16Value;
709 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
710 if (rcStrict != VINF_SUCCESS)
711 return rcStrict;
712 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
713 break;
714 }
715 case IEMMODE_32BIT:
716 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
717 if (rcStrict != VINF_SUCCESS)
718 return rcStrict;
719 break;
720 IEM_NOT_REACHED_DEFAULT_CASE_RET();
721 }
722
723 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
724 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
725 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
726 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
727 }
728 /*
729 * Interrupt flag virtualization with CR4.VME=1.
730 */
731 else if ( enmEffOpSize == IEMMODE_16BIT
732 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) )
733 {
734 uint16_t u16Value;
735 RTUINT64U TmpRsp;
736 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
737 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp);
738 if (rcStrict != VINF_SUCCESS)
739 return rcStrict;
740
741 /** @todo Is the popf VME \#GP(0) delivered after updating RSP+RIP
742 * or before? */
743 if ( ( (u16Value & X86_EFL_IF)
744 && (fEflOld & X86_EFL_VIP))
745 || (u16Value & X86_EFL_TF) )
746 return iemRaiseGeneralProtectionFault0(pVCpu);
747
748 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
749 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
750 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
751 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
752
753 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
754 }
755 else
756 return iemRaiseGeneralProtectionFault0(pVCpu);
757
758 }
759 /*
760 * Not in V8086 mode.
761 */
762 else
763 {
764 /* Pop the flags. */
765 switch (enmEffOpSize)
766 {
767 case IEMMODE_16BIT:
768 {
769 uint16_t u16Value;
770 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
771 if (rcStrict != VINF_SUCCESS)
772 return rcStrict;
773 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
774
775 /*
776 * Ancient CPU adjustments:
777 * - 8086, 80186, V20/30:
778 * Fixed bits 15:12 bits are not kept correctly internally, mostly for
779 * practical reasons (masking below). We add them when pushing flags.
780 * - 80286:
781 * The NT and IOPL flags cannot be popped from real mode and are
782 * therefore always zero (since a 286 can never exit from PM and
783 * their initial value is zero). This changed on a 386 and can
784 * therefore be used to detect 286 or 386 CPU in real mode.
785 */
786 if ( IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286
787 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
788 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL);
789 break;
790 }
791 case IEMMODE_32BIT:
792 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
793 if (rcStrict != VINF_SUCCESS)
794 return rcStrict;
795 break;
796 case IEMMODE_64BIT:
797 {
798 uint64_t u64Value;
799 rcStrict = iemMemStackPopU64(pVCpu, &u64Value);
800 if (rcStrict != VINF_SUCCESS)
801 return rcStrict;
802 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
803 break;
804 }
805 IEM_NOT_REACHED_DEFAULT_CASE_RET();
806 }
807
808 /* Merge them with the current flags. */
809 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
810 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
811 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
812 || pVCpu->iem.s.uCpl == 0)
813 {
814 fEflNew &= fPopfBits;
815 fEflNew |= ~fPopfBits & fEflOld;
816 }
817 else if (pVCpu->iem.s.uCpl <= X86_EFL_GET_IOPL(fEflOld))
818 {
819 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
820 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
821 }
822 else
823 {
824 fEflNew &= fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF);
825 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
826 }
827 }
828
829 /*
830 * Commit the flags.
831 */
832 Assert(fEflNew & RT_BIT_32(1));
833 IEMMISC_SET_EFL(pVCpu, fEflNew);
834 return iemRegAddToRipAndFinishingClearingRfEx(pVCpu, cbInstr, fEflOld);
835}
836
837
838/**
839 * Implements an indirect call.
840 *
841 * @param uNewPC The new program counter (RIP) value (loaded from the
842 * operand).
843 */
844IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
845{
846 uint16_t const uOldPC = pVCpu->cpum.GstCtx.ip + cbInstr;
847 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)
848 {
849 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
850 if (rcStrict == VINF_SUCCESS)
851 {
852 pVCpu->cpum.GstCtx.rip = uNewPC;
853 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
854 return iemRegFinishClearingRF(pVCpu);
855 }
856 return rcStrict;
857 }
858 return iemRaiseGeneralProtectionFault0(pVCpu);
859}
860
861
862/**
863 * Implements a 16-bit relative call.
864 *
865 * @param offDisp The displacment offset.
866 */
867IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
868{
869 uint16_t const uOldPC = pVCpu->cpum.GstCtx.ip + cbInstr;
870 uint16_t const uNewPC = uOldPC + offDisp;
871 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)
872 {
873 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
874 if (rcStrict == VINF_SUCCESS)
875 {
876 pVCpu->cpum.GstCtx.rip = uNewPC;
877 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
878 return iemRegFinishClearingRF(pVCpu);
879 }
880 return rcStrict;
881 }
882 return iemRaiseGeneralProtectionFault0(pVCpu);
883}
884
885
886/**
887 * Implements a 32-bit indirect call.
888 *
889 * @param uNewPC The new program counter (RIP) value (loaded from the
890 * operand).
891 */
892IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
893{
894 uint32_t const uOldPC = pVCpu->cpum.GstCtx.eip + cbInstr;
895 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)
896 {
897 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
898 if (rcStrict == VINF_SUCCESS)
899 {
900 pVCpu->cpum.GstCtx.rip = uNewPC;
901 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
902 return iemRegFinishClearingRF(pVCpu);
903 }
904 return rcStrict;
905 }
906 return iemRaiseGeneralProtectionFault0(pVCpu);
907}
908
909
910/**
911 * Implements a 32-bit relative call.
912 *
913 * @param offDisp The displacment offset.
914 */
915IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
916{
917 uint32_t const uOldPC = pVCpu->cpum.GstCtx.eip + cbInstr;
918 uint32_t const uNewPC = uOldPC + offDisp;
919 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)
920 {
921 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
922 if (rcStrict == VINF_SUCCESS)
923 {
924 pVCpu->cpum.GstCtx.rip = uNewPC;
925 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
926 return iemRegFinishClearingRF(pVCpu);
927 }
928 return rcStrict;
929 }
930 return iemRaiseGeneralProtectionFault0(pVCpu);
931}
932
933
934/**
935 * Implements a 64-bit indirect call.
936 *
937 * @param uNewPC The new program counter (RIP) value (loaded from the
938 * operand).
939 */
940IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
941{
942 uint64_t const uOldPC = pVCpu->cpum.GstCtx.rip + cbInstr;
943 if (IEM_IS_CANONICAL(uNewPC))
944 {
945 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
946 if (rcStrict == VINF_SUCCESS)
947 {
948 pVCpu->cpum.GstCtx.rip = uNewPC;
949 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
950 return iemRegFinishClearingRF(pVCpu);
951 }
952 return rcStrict;
953 }
954 return iemRaiseGeneralProtectionFault0(pVCpu);
955}
956
957
958/**
959 * Implements a 64-bit relative call.
960 *
961 * @param offDisp The displacment offset.
962 */
963IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
964{
965 uint64_t const uOldPC = pVCpu->cpum.GstCtx.rip + cbInstr;
966 uint64_t const uNewPC = uOldPC + offDisp;
967 if (IEM_IS_CANONICAL(uNewPC))
968 {
969 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
970 if (rcStrict == VINF_SUCCESS)
971 {
972 pVCpu->cpum.GstCtx.rip = uNewPC;
973 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
974 return iemRegFinishClearingRF(pVCpu);
975 }
976 return rcStrict;
977 }
978 return iemRaiseNotCanonical(pVCpu);
979}
980
981
982/**
983 * Implements far jumps and calls thru task segments (TSS).
984 *
985 * @returns VBox strict status code.
986 * @param pVCpu The cross context virtual CPU structure of the
987 * calling thread.
988 * @param cbInstr The current instruction length.
989 * @param uSel The selector.
990 * @param enmBranch The kind of branching we're performing.
991 * @param enmEffOpSize The effective operand size.
992 * @param pDesc The descriptor corresponding to @a uSel. The type is
993 * task gate.
994 */
995static VBOXSTRICTRC iemCImpl_BranchTaskSegment(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
996 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
997{
998#ifndef IEM_IMPLEMENTS_TASKSWITCH
999 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1000#else
1001 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1002 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
1003 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
1004 RT_NOREF_PV(enmEffOpSize);
1005 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1006
1007 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
1008 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1009 {
1010 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1011 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1012 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1013 }
1014
1015 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
1016 * far calls (see iemCImpl_callf). Most likely in both cases it should be
1017 * checked here, need testcases. */
1018 if (!pDesc->Legacy.Gen.u1Present)
1019 {
1020 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel));
1021 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1022 }
1023
1024 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
1025 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1026 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc);
1027#endif
1028}
1029
1030
1031/**
1032 * Implements far jumps and calls thru task gates.
1033 *
1034 * @returns VBox strict status code.
1035 * @param pVCpu The cross context virtual CPU structure of the
1036 * calling thread.
1037 * @param cbInstr The current instruction length.
1038 * @param uSel The selector.
1039 * @param enmBranch The kind of branching we're performing.
1040 * @param enmEffOpSize The effective operand size.
1041 * @param pDesc The descriptor corresponding to @a uSel. The type is
1042 * task gate.
1043 */
1044static VBOXSTRICTRC iemCImpl_BranchTaskGate(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
1045 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
1046{
1047#ifndef IEM_IMPLEMENTS_TASKSWITCH
1048 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1049#else
1050 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1051 RT_NOREF_PV(enmEffOpSize);
1052 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1053
1054 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
1055 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1056 {
1057 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1058 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1059 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1060 }
1061
1062 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
1063 * far calls (see iemCImpl_callf). Most likely in both cases it should be
1064 * checked here, need testcases. */
1065 if (!pDesc->Legacy.Gen.u1Present)
1066 {
1067 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel));
1068 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1069 }
1070
1071 /*
1072 * Fetch the new TSS descriptor from the GDT.
1073 */
1074 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel;
1075 if (uSelTss & X86_SEL_LDT)
1076 {
1077 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss));
1078 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1079 }
1080
1081 IEMSELDESC TssDesc;
1082 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelTss, X86_XCPT_GP);
1083 if (rcStrict != VINF_SUCCESS)
1084 return rcStrict;
1085
1086 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK)
1087 {
1088 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss,
1089 TssDesc.Legacy.Gate.u4Type));
1090 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1091 }
1092
1093 if (!TssDesc.Legacy.Gate.u1Present)
1094 {
1095 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss));
1096 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelTss & X86_SEL_MASK_OFF_RPL);
1097 }
1098
1099 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
1100 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1101 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc);
1102#endif
1103}
1104
1105
1106/**
1107 * Implements far jumps and calls thru call gates.
1108 *
1109 * @returns VBox strict status code.
1110 * @param pVCpu The cross context virtual CPU structure of the
1111 * calling thread.
1112 * @param cbInstr The current instruction length.
1113 * @param uSel The selector.
1114 * @param enmBranch The kind of branching we're performing.
1115 * @param enmEffOpSize The effective operand size.
1116 * @param pDesc The descriptor corresponding to @a uSel. The type is
1117 * call gate.
1118 */
1119static VBOXSTRICTRC iemCImpl_BranchCallGate(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
1120 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
1121{
1122#define IEM_IMPLEMENTS_CALLGATE
1123#ifndef IEM_IMPLEMENTS_CALLGATE
1124 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1125#else
1126 RT_NOREF_PV(enmEffOpSize);
1127 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1128
1129 /* NB: Far jumps can only do intra-privilege transfers. Far calls support
1130 * inter-privilege calls and are much more complex.
1131 *
1132 * NB: 64-bit call gate has the same type as a 32-bit call gate! If
1133 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
1134 * must be 16-bit or 32-bit.
1135 */
1136 /** @todo effective operand size is probably irrelevant here, only the
1137 * call gate bitness matters??
1138 */
1139 VBOXSTRICTRC rcStrict;
1140 RTPTRUNION uPtrRet;
1141 uint64_t uNewRsp;
1142 uint64_t uNewRip;
1143 uint64_t u64Base;
1144 uint32_t cbLimit;
1145 RTSEL uNewCS;
1146 IEMSELDESC DescCS;
1147
1148 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE);
1149 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1150 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE
1151 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE);
1152
1153 /* Determine the new instruction pointer from the gate descriptor. */
1154 uNewRip = pDesc->Legacy.Gate.u16OffsetLow
1155 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16)
1156 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32);
1157
1158 /* Perform DPL checks on the gate descriptor. */
1159 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
1160 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1161 {
1162 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1163 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1164 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1165 }
1166
1167 /** @todo does this catch NULL selectors, too? */
1168 if (!pDesc->Legacy.Gen.u1Present)
1169 {
1170 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel));
1171 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1172 }
1173
1174 /*
1175 * Fetch the target CS descriptor from the GDT or LDT.
1176 */
1177 uNewCS = pDesc->Legacy.Gate.u16Sel;
1178 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_GP);
1179 if (rcStrict != VINF_SUCCESS)
1180 return rcStrict;
1181
1182 /* Target CS must be a code selector. */
1183 if ( !DescCS.Legacy.Gen.u1DescType
1184 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1185 {
1186 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1187 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1188 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1189 }
1190
1191 /* Privilege checks on target CS. */
1192 if (enmBranch == IEMBRANCH_JUMP)
1193 {
1194 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1195 {
1196 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
1197 {
1198 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1199 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1200 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1201 }
1202 }
1203 else
1204 {
1205 if (DescCS.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl)
1206 {
1207 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1208 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1209 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1210 }
1211 }
1212 }
1213 else
1214 {
1215 Assert(enmBranch == IEMBRANCH_CALL);
1216 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
1217 {
1218 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1219 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1220 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1221 }
1222 }
1223
1224 /* Additional long mode checks. */
1225 if (IEM_IS_LONG_MODE(pVCpu))
1226 {
1227 if (!DescCS.Legacy.Gen.u1Long)
1228 {
1229 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS));
1230 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1231 }
1232
1233 /* L vs D. */
1234 if ( DescCS.Legacy.Gen.u1Long
1235 && DescCS.Legacy.Gen.u1DefBig)
1236 {
1237 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS));
1238 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1239 }
1240 }
1241
1242 if (!DescCS.Legacy.Gate.u1Present)
1243 {
1244 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS));
1245 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCS);
1246 }
1247
1248 if (enmBranch == IEMBRANCH_JUMP)
1249 {
1250 /** @todo This is very similar to regular far jumps; merge! */
1251 /* Jumps are fairly simple... */
1252
1253 /* Chop the high bits off if 16-bit gate (Intel says so). */
1254 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1255 uNewRip = (uint16_t)uNewRip;
1256
1257 /* Limit check for non-long segments. */
1258 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1259 if (DescCS.Legacy.Gen.u1Long)
1260 u64Base = 0;
1261 else
1262 {
1263 if (uNewRip > cbLimit)
1264 {
1265 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
1266 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1267 }
1268 u64Base = X86DESC_BASE(&DescCS.Legacy);
1269 }
1270
1271 /* Canonical address check. */
1272 if (!IEM_IS_CANONICAL(uNewRip))
1273 {
1274 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1275 return iemRaiseNotCanonical(pVCpu);
1276 }
1277
1278 /*
1279 * Ok, everything checked out fine. Now set the accessed bit before
1280 * committing the result into CS, CSHID and RIP.
1281 */
1282 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1283 {
1284 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1285 if (rcStrict != VINF_SUCCESS)
1286 return rcStrict;
1287 /** @todo check what VT-x and AMD-V does. */
1288 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1289 }
1290
1291 /* commit */
1292 pVCpu->cpum.GstCtx.rip = uNewRip;
1293 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1294 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */
1295 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1296 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1297 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1298 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1299 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1300 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1301 }
1302 else
1303 {
1304 Assert(enmBranch == IEMBRANCH_CALL);
1305 /* Calls are much more complicated. */
1306
1307 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) && (DescCS.Legacy.Gen.u2Dpl < pVCpu->iem.s.uCpl))
1308 {
1309 uint16_t offNewStack; /* Offset of new stack in TSS. */
1310 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */
1311 uint8_t uNewCSDpl;
1312 uint8_t cbWords;
1313 RTSEL uNewSS;
1314 RTSEL uOldSS;
1315 uint64_t uOldRsp;
1316 IEMSELDESC DescSS;
1317 RTPTRUNION uPtrTSS;
1318 RTGCPTR GCPtrTSS;
1319 RTPTRUNION uPtrParmWds;
1320 RTGCPTR GCPtrParmWds;
1321
1322 /* More privilege. This is the fun part. */
1323 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */
1324
1325 /*
1326 * Determine new SS:rSP from the TSS.
1327 */
1328 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType);
1329
1330 /* Figure out where the new stack pointer is stored in the TSS. */
1331 uNewCSDpl = DescCS.Legacy.Gen.u2Dpl;
1332 if (!IEM_IS_LONG_MODE(pVCpu))
1333 {
1334 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1335 {
1336 offNewStack = RT_UOFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8;
1337 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0);
1338 }
1339 else
1340 {
1341 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1342 offNewStack = RT_UOFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4;
1343 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0);
1344 }
1345 }
1346 else
1347 {
1348 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1349 offNewStack = RT_UOFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0);
1350 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0);
1351 }
1352
1353 /* Check against TSS limit. */
1354 if ((uint16_t)(offNewStack + cbNewStack - 1) > pVCpu->cpum.GstCtx.tr.u32Limit)
1355 {
1356 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pVCpu->cpum.GstCtx.tr.u32Limit));
1357 return iemRaiseTaskSwitchFaultBySelector(pVCpu, pVCpu->cpum.GstCtx.tr.Sel);
1358 }
1359
1360 GCPtrTSS = pVCpu->cpum.GstCtx.tr.u64Base + offNewStack;
1361 rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R, 0);
1362 if (rcStrict != VINF_SUCCESS)
1363 {
1364 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1365 return rcStrict;
1366 }
1367
1368 if (!IEM_IS_LONG_MODE(pVCpu))
1369 {
1370 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1371 {
1372 uNewRsp = uPtrTSS.pu32[0];
1373 uNewSS = uPtrTSS.pu16[2];
1374 }
1375 else
1376 {
1377 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1378 uNewRsp = uPtrTSS.pu16[0];
1379 uNewSS = uPtrTSS.pu16[1];
1380 }
1381 }
1382 else
1383 {
1384 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1385 /* SS will be a NULL selector, but that's valid. */
1386 uNewRsp = uPtrTSS.pu64[0];
1387 uNewSS = uNewCSDpl;
1388 }
1389
1390 /* Done with the TSS now. */
1391 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);
1392 if (rcStrict != VINF_SUCCESS)
1393 {
1394 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1395 return rcStrict;
1396 }
1397
1398 /* Only used outside of long mode. */
1399 cbWords = pDesc->Legacy.Gate.u5ParmCount;
1400
1401 /* If EFER.LMA is 0, there's extra work to do. */
1402 if (!IEM_IS_LONG_MODE(pVCpu))
1403 {
1404 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
1405 {
1406 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
1407 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1408 }
1409
1410 /* Grab the new SS descriptor. */
1411 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1412 if (rcStrict != VINF_SUCCESS)
1413 return rcStrict;
1414
1415 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */
1416 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL))
1417 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl))
1418 {
1419 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n",
1420 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl));
1421 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1422 }
1423
1424 /* Ensure new SS is a writable data segment. */
1425 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
1426 {
1427 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
1428 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1429 }
1430
1431 if (!DescSS.Legacy.Gen.u1Present)
1432 {
1433 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS));
1434 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
1435 }
1436 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1437 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords);
1438 else
1439 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords);
1440 }
1441 else
1442 {
1443 /* Just grab the new (NULL) SS descriptor. */
1444 /** @todo testcase: Check whether the zero GDT entry is actually loaded here
1445 * like we do... */
1446 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1447 if (rcStrict != VINF_SUCCESS)
1448 return rcStrict;
1449
1450 cbNewStack = sizeof(uint64_t) * 4;
1451 }
1452
1453 /** @todo According to Intel, new stack is checked for enough space first,
1454 * then switched. According to AMD, the stack is switched first and
1455 * then pushes might fault!
1456 * NB: OS/2 Warp 3/4 actively relies on the fact that possible
1457 * incoming stack \#PF happens before actual stack switch. AMD is
1458 * either lying or implicitly assumes that new state is committed
1459 * only if and when an instruction doesn't fault.
1460 */
1461
1462 /** @todo According to AMD, CS is loaded first, then SS.
1463 * According to Intel, it's the other way around!?
1464 */
1465
1466 /** @todo Intel and AMD disagree on when exactly the CPL changes! */
1467
1468 /* Set the accessed bit before committing new SS. */
1469 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1470 {
1471 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
1472 if (rcStrict != VINF_SUCCESS)
1473 return rcStrict;
1474 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1475 }
1476
1477 /* Remember the old SS:rSP and their linear address. */
1478 uOldSS = pVCpu->cpum.GstCtx.ss.Sel;
1479 uOldRsp = pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig ? pVCpu->cpum.GstCtx.rsp : pVCpu->cpum.GstCtx.sp;
1480
1481 GCPtrParmWds = pVCpu->cpum.GstCtx.ss.u64Base + uOldRsp;
1482
1483 /* HACK ALERT! Probe if the write to the new stack will succeed. May #SS(NewSS)
1484 or #PF, the former is not implemented in this workaround. */
1485 /** @todo Proper fix callgate target stack exceptions. */
1486 /** @todo testcase: Cover callgates with partially or fully inaccessible
1487 * target stacks. */
1488 void *pvNewFrame;
1489 RTGCPTR GCPtrNewStack = X86DESC_BASE(&DescSS.Legacy) + uNewRsp - cbNewStack;
1490 rcStrict = iemMemMap(pVCpu, &pvNewFrame, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW, 0);
1491 if (rcStrict != VINF_SUCCESS)
1492 {
1493 Log(("BranchCallGate: Incoming stack (%04x:%08RX64) not accessible, rc=%Rrc\n", uNewSS, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
1494 return rcStrict;
1495 }
1496 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewFrame, IEM_ACCESS_SYS_RW);
1497 if (rcStrict != VINF_SUCCESS)
1498 {
1499 Log(("BranchCallGate: New stack probe unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1500 return rcStrict;
1501 }
1502
1503 /* Commit new SS:rSP. */
1504 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
1505 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
1506 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1507 pVCpu->cpum.GstCtx.ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);
1508 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1509 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1510 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1511 pVCpu->iem.s.uCpl = uNewCSDpl; /** @todo is the parameter words accessed using the new CPL or the old CPL? */
1512 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1513 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
1514
1515 /* At this point the stack access must not fail because new state was already committed. */
1516 /** @todo this can still fail due to SS.LIMIT not check. */
1517 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbNewStack,
1518 IEM_IS_LONG_MODE(pVCpu) ? 7
1519 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 1,
1520 &uPtrRet.pv, &uNewRsp);
1521 AssertMsgReturn(rcStrict == VINF_SUCCESS, ("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)),
1522 VERR_INTERNAL_ERROR_5);
1523
1524 if (!IEM_IS_LONG_MODE(pVCpu))
1525 {
1526 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1527 {
1528 if (cbWords)
1529 {
1530 /* Map the relevant chunk of the old stack. */
1531 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds,
1532 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
1533 if (rcStrict != VINF_SUCCESS)
1534 {
1535 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1536 return rcStrict;
1537 }
1538
1539 /* Copy the parameter (d)words. */
1540 for (int i = 0; i < cbWords; ++i)
1541 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i];
1542
1543 /* Unmap the old stack. */
1544 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1545 if (rcStrict != VINF_SUCCESS)
1546 {
1547 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1548 return rcStrict;
1549 }
1550 }
1551
1552 /* Push the old CS:rIP. */
1553 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1554 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1555
1556 /* Push the old SS:rSP. */
1557 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
1558 uPtrRet.pu32[2 + cbWords + 1] = uOldSS;
1559 }
1560 else
1561 {
1562 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1563
1564 if (cbWords)
1565 {
1566 /* Map the relevant chunk of the old stack. */
1567 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds,
1568 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
1569 if (rcStrict != VINF_SUCCESS)
1570 {
1571 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1572 return rcStrict;
1573 }
1574
1575 /* Copy the parameter words. */
1576 for (int i = 0; i < cbWords; ++i)
1577 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i];
1578
1579 /* Unmap the old stack. */
1580 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1581 if (rcStrict != VINF_SUCCESS)
1582 {
1583 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1584 return rcStrict;
1585 }
1586 }
1587
1588 /* Push the old CS:rIP. */
1589 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1590 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1591
1592 /* Push the old SS:rSP. */
1593 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp;
1594 uPtrRet.pu16[2 + cbWords + 1] = uOldSS;
1595 }
1596 }
1597 else
1598 {
1599 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1600
1601 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
1602 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
1603 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1604 uPtrRet.pu64[2] = uOldRsp;
1605 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */
1606 }
1607
1608 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1609 if (rcStrict != VINF_SUCCESS)
1610 {
1611 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1612 return rcStrict;
1613 }
1614
1615 /* Chop the high bits off if 16-bit gate (Intel says so). */
1616 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1617 uNewRip = (uint16_t)uNewRip;
1618
1619 /* Limit / canonical check. */
1620 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1621 if (!IEM_IS_LONG_MODE(pVCpu))
1622 {
1623 if (uNewRip > cbLimit)
1624 {
1625 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1626 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1627 }
1628 u64Base = X86DESC_BASE(&DescCS.Legacy);
1629 }
1630 else
1631 {
1632 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1633 if (!IEM_IS_CANONICAL(uNewRip))
1634 {
1635 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1636 return iemRaiseNotCanonical(pVCpu);
1637 }
1638 u64Base = 0;
1639 }
1640
1641 /*
1642 * Now set the accessed bit before
1643 * writing the return address to the stack and committing the result into
1644 * CS, CSHID and RIP.
1645 */
1646 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1647 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1648 {
1649 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1650 if (rcStrict != VINF_SUCCESS)
1651 return rcStrict;
1652 /** @todo check what VT-x and AMD-V does. */
1653 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1654 }
1655
1656 /* Commit new CS:rIP. */
1657 pVCpu->cpum.GstCtx.rip = uNewRip;
1658 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1659 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl;
1660 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1661 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1662 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1663 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1664 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1665 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1666 }
1667 else
1668 {
1669 /* Same privilege. */
1670 /** @todo This is very similar to regular far calls; merge! */
1671
1672 /* Check stack first - may #SS(0). */
1673 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
1674 * 16-bit code cause a two or four byte CS to be pushed? */
1675 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
1676 IEM_IS_LONG_MODE(pVCpu) ? 8+8
1677 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
1678 IEM_IS_LONG_MODE(pVCpu) ? 7
1679 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 2,
1680 &uPtrRet.pv, &uNewRsp);
1681 if (rcStrict != VINF_SUCCESS)
1682 return rcStrict;
1683
1684 /* Chop the high bits off if 16-bit gate (Intel says so). */
1685 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1686 uNewRip = (uint16_t)uNewRip;
1687
1688 /* Limit / canonical check. */
1689 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1690 if (!IEM_IS_LONG_MODE(pVCpu))
1691 {
1692 if (uNewRip > cbLimit)
1693 {
1694 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1695 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1696 }
1697 u64Base = X86DESC_BASE(&DescCS.Legacy);
1698 }
1699 else
1700 {
1701 if (!IEM_IS_CANONICAL(uNewRip))
1702 {
1703 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1704 return iemRaiseNotCanonical(pVCpu);
1705 }
1706 u64Base = 0;
1707 }
1708
1709 /*
1710 * Now set the accessed bit before
1711 * writing the return address to the stack and committing the result into
1712 * CS, CSHID and RIP.
1713 */
1714 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1715 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1716 {
1717 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1718 if (rcStrict != VINF_SUCCESS)
1719 return rcStrict;
1720 /** @todo check what VT-x and AMD-V does. */
1721 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1722 }
1723
1724 /* stack */
1725 if (!IEM_IS_LONG_MODE(pVCpu))
1726 {
1727 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1728 {
1729 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1730 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1731 }
1732 else
1733 {
1734 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1735 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1736 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1737 }
1738 }
1739 else
1740 {
1741 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1742 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
1743 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1744 }
1745
1746 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1747 if (rcStrict != VINF_SUCCESS)
1748 return rcStrict;
1749
1750 /* commit */
1751 pVCpu->cpum.GstCtx.rip = uNewRip;
1752 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1753 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl;
1754 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1755 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1756 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1757 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1758 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1759 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1760 }
1761 }
1762 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
1763/** @todo single stepping */
1764
1765 /* Flush the prefetch buffer. */
1766 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
1767 return VINF_SUCCESS;
1768#endif /* IEM_IMPLEMENTS_CALLGATE */
1769}
1770
1771
1772/**
1773 * Implements far jumps and calls thru system selectors.
1774 *
1775 * @returns VBox strict status code.
1776 * @param pVCpu The cross context virtual CPU structure of the
1777 * calling thread.
1778 * @param cbInstr The current instruction length.
1779 * @param uSel The selector.
1780 * @param enmBranch The kind of branching we're performing.
1781 * @param enmEffOpSize The effective operand size.
1782 * @param pDesc The descriptor corresponding to @a uSel.
1783 */
1784static VBOXSTRICTRC iemCImpl_BranchSysSel(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
1785 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
1786{
1787 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1788 Assert((uSel & X86_SEL_MASK_OFF_RPL));
1789 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1790
1791 if (IEM_IS_LONG_MODE(pVCpu))
1792 switch (pDesc->Legacy.Gen.u4Type)
1793 {
1794 case AMD64_SEL_TYPE_SYS_CALL_GATE:
1795 return iemCImpl_BranchCallGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1796
1797 default:
1798 case AMD64_SEL_TYPE_SYS_LDT:
1799 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
1800 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
1801 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
1802 case AMD64_SEL_TYPE_SYS_INT_GATE:
1803 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1804 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1805 }
1806
1807 switch (pDesc->Legacy.Gen.u4Type)
1808 {
1809 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1810 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1811 return iemCImpl_BranchCallGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1812
1813 case X86_SEL_TYPE_SYS_TASK_GATE:
1814 return iemCImpl_BranchTaskGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1815
1816 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1817 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1818 return iemCImpl_BranchTaskSegment(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1819
1820 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1821 Log(("branch %04x -> busy 286 TSS\n", uSel));
1822 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1823
1824 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1825 Log(("branch %04x -> busy 386 TSS\n", uSel));
1826 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1827
1828 default:
1829 case X86_SEL_TYPE_SYS_LDT:
1830 case X86_SEL_TYPE_SYS_286_INT_GATE:
1831 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1832 case X86_SEL_TYPE_SYS_386_INT_GATE:
1833 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1834 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1835 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1836 }
1837}
1838
1839
1840/**
1841 * Implements far jumps.
1842 *
1843 * @param uSel The selector.
1844 * @param offSeg The segment offset.
1845 * @param enmEffOpSize The effective operand size.
1846 */
1847IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1848{
1849 NOREF(cbInstr);
1850 Assert(offSeg <= UINT32_MAX || (!IEM_IS_GUEST_CPU_AMD(pVCpu) && pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT));
1851
1852 /*
1853 * Real mode and V8086 mode are easy. The only snag seems to be that
1854 * CS.limit doesn't change and the limit check is done against the current
1855 * limit.
1856 */
1857 /** @todo Robert Collins claims (The Segment Descriptor Cache, DDJ August
1858 * 1998) that up to and including the Intel 486, far control
1859 * transfers in real mode set default CS attributes (0x93) and also
1860 * set a 64K segment limit. Starting with the Pentium, the
1861 * attributes and limit are left alone but the access rights are
1862 * ignored. We only implement the Pentium+ behavior.
1863 * */
1864 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
1865 {
1866 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1867 if (offSeg > pVCpu->cpum.GstCtx.cs.u32Limit)
1868 {
1869 Log(("iemCImpl_FarJmp: 16-bit limit\n"));
1870 return iemRaiseGeneralProtectionFault0(pVCpu);
1871 }
1872
1873 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
1874 pVCpu->cpum.GstCtx.rip = offSeg;
1875 else
1876 pVCpu->cpum.GstCtx.rip = offSeg & UINT16_MAX;
1877 pVCpu->cpum.GstCtx.cs.Sel = uSel;
1878 pVCpu->cpum.GstCtx.cs.ValidSel = uSel;
1879 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1880 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4;
1881
1882 return iemRegFinishClearingRF(pVCpu);
1883 }
1884
1885 /*
1886 * Protected mode. Need to parse the specified descriptor...
1887 */
1888 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1889 {
1890 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1891 return iemRaiseGeneralProtectionFault0(pVCpu);
1892 }
1893
1894 /* Fetch the descriptor. */
1895 IEMSELDESC Desc;
1896 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
1897 if (rcStrict != VINF_SUCCESS)
1898 return rcStrict;
1899
1900 /* Is it there? */
1901 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
1902 {
1903 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1904 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1905 }
1906
1907 /*
1908 * Deal with it according to its type. We do the standard code selectors
1909 * here and dispatch the system selectors to worker functions.
1910 */
1911 if (!Desc.Legacy.Gen.u1DescType)
1912 return iemCImpl_BranchSysSel(pVCpu, cbInstr, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
1913
1914 /* Only code segments. */
1915 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1916 {
1917 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1918 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1919 }
1920
1921 /* L vs D. */
1922 if ( Desc.Legacy.Gen.u1Long
1923 && Desc.Legacy.Gen.u1DefBig
1924 && IEM_IS_LONG_MODE(pVCpu))
1925 {
1926 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1927 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1928 }
1929
1930 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1931 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1932 {
1933 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl)
1934 {
1935 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1936 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1937 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1938 }
1939 }
1940 else
1941 {
1942 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl)
1943 {
1944 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1945 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1946 }
1947 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl)
1948 {
1949 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl));
1950 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1951 }
1952 }
1953
1954 /* Chop the high bits if 16-bit (Intel says so). */
1955 if (enmEffOpSize == IEMMODE_16BIT)
1956 offSeg &= UINT16_MAX;
1957
1958 /* Limit check and get the base. */
1959 uint64_t u64Base;
1960 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1961 if ( !Desc.Legacy.Gen.u1Long
1962 || !IEM_IS_LONG_MODE(pVCpu))
1963 {
1964 if (RT_LIKELY(offSeg <= cbLimit))
1965 u64Base = X86DESC_BASE(&Desc.Legacy);
1966 else
1967 {
1968 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1969 /** @todo Intel says this is \#GP(0)! */
1970 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1971 }
1972 }
1973 else
1974 u64Base = 0;
1975
1976 /*
1977 * Ok, everything checked out fine. Now set the accessed bit before
1978 * committing the result into CS, CSHID and RIP.
1979 */
1980 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1981 {
1982 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
1983 if (rcStrict != VINF_SUCCESS)
1984 return rcStrict;
1985 /** @todo check what VT-x and AMD-V does. */
1986 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1987 }
1988
1989 /* commit */
1990 pVCpu->cpum.GstCtx.rip = offSeg;
1991 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1992 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */
1993 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1994 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1995 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1996 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1997 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1998 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1999 /** @todo check if the hidden bits are loaded correctly for 64-bit
2000 * mode. */
2001
2002 /* Flush the prefetch buffer. */
2003 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
2004
2005 return iemRegFinishClearingRF(pVCpu);
2006}
2007
2008
2009/**
2010 * Implements far calls.
2011 *
2012 * This very similar to iemCImpl_FarJmp.
2013 *
2014 * @param uSel The selector.
2015 * @param offSeg The segment offset.
2016 * @param enmEffOpSize The operand size (in case we need it).
2017 */
2018IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
2019{
2020 VBOXSTRICTRC rcStrict;
2021 uint64_t uNewRsp;
2022 RTPTRUNION uPtrRet;
2023
2024 /*
2025 * Real mode and V8086 mode are easy. The only snag seems to be that
2026 * CS.limit doesn't change and the limit check is done against the current
2027 * limit.
2028 */
2029 /** @todo See comment for similar code in iemCImpl_FarJmp */
2030 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2031 {
2032 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
2033
2034 /* Check stack first - may #SS(0). */
2035 rcStrict = iemMemStackPushBeginSpecial(pVCpu, enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2036 enmEffOpSize == IEMMODE_32BIT ? 3 : 1,
2037 &uPtrRet.pv, &uNewRsp);
2038 if (rcStrict != VINF_SUCCESS)
2039 return rcStrict;
2040
2041 /* Check the target address range. */
2042/** @todo this must be wrong! Write unreal mode tests! */
2043 if (offSeg > UINT32_MAX)
2044 return iemRaiseGeneralProtectionFault0(pVCpu);
2045
2046 /* Everything is fine, push the return address. */
2047 if (enmEffOpSize == IEMMODE_16BIT)
2048 {
2049 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
2050 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
2051 }
2052 else
2053 {
2054 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
2055 uPtrRet.pu16[2] = pVCpu->cpum.GstCtx.cs.Sel;
2056 }
2057 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2058 if (rcStrict != VINF_SUCCESS)
2059 return rcStrict;
2060
2061 /* Branch. */
2062 pVCpu->cpum.GstCtx.rip = offSeg;
2063 pVCpu->cpum.GstCtx.cs.Sel = uSel;
2064 pVCpu->cpum.GstCtx.cs.ValidSel = uSel;
2065 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2066 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4;
2067
2068 return iemRegFinishClearingRF(pVCpu);
2069 }
2070
2071 /*
2072 * Protected mode. Need to parse the specified descriptor...
2073 */
2074 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2075 {
2076 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
2077 return iemRaiseGeneralProtectionFault0(pVCpu);
2078 }
2079
2080 /* Fetch the descriptor. */
2081 IEMSELDESC Desc;
2082 rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
2083 if (rcStrict != VINF_SUCCESS)
2084 return rcStrict;
2085
2086 /*
2087 * Deal with it according to its type. We do the standard code selectors
2088 * here and dispatch the system selectors to worker functions.
2089 */
2090 if (!Desc.Legacy.Gen.u1DescType)
2091 return iemCImpl_BranchSysSel(pVCpu, cbInstr, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
2092
2093 /* Only code segments. */
2094 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2095 {
2096 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
2097 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2098 }
2099
2100 /* L vs D. */
2101 if ( Desc.Legacy.Gen.u1Long
2102 && Desc.Legacy.Gen.u1DefBig
2103 && IEM_IS_LONG_MODE(pVCpu))
2104 {
2105 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
2106 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2107 }
2108
2109 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
2110 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2111 {
2112 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl)
2113 {
2114 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
2115 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2116 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2117 }
2118 }
2119 else
2120 {
2121 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl)
2122 {
2123 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2124 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2125 }
2126 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl)
2127 {
2128 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl));
2129 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2130 }
2131 }
2132
2133 /* Is it there? */
2134 if (!Desc.Legacy.Gen.u1Present)
2135 {
2136 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
2137 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
2138 }
2139
2140 /* Check stack first - may #SS(0). */
2141 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
2142 * 16-bit code cause a two or four byte CS to be pushed? */
2143 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
2144 enmEffOpSize == IEMMODE_64BIT ? 8+8 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2145 enmEffOpSize == IEMMODE_64BIT ? 7 : enmEffOpSize == IEMMODE_32BIT ? 3 : 1,
2146 &uPtrRet.pv, &uNewRsp);
2147 if (rcStrict != VINF_SUCCESS)
2148 return rcStrict;
2149
2150 /* Chop the high bits if 16-bit (Intel says so). */
2151 if (enmEffOpSize == IEMMODE_16BIT)
2152 offSeg &= UINT16_MAX;
2153
2154 /* Limit / canonical check. */
2155 uint64_t u64Base;
2156 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2157 if ( !Desc.Legacy.Gen.u1Long
2158 || !IEM_IS_LONG_MODE(pVCpu))
2159 {
2160 if (RT_LIKELY(offSeg <= cbLimit))
2161 u64Base = X86DESC_BASE(&Desc.Legacy);
2162 else
2163 {
2164 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
2165 /** @todo Intel says this is \#GP(0)! */
2166 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2167 }
2168 }
2169 else if (IEM_IS_CANONICAL(offSeg))
2170 u64Base = 0;
2171 else
2172 {
2173 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
2174 return iemRaiseNotCanonical(pVCpu);
2175 }
2176
2177 /*
2178 * Now set the accessed bit before
2179 * writing the return address to the stack and committing the result into
2180 * CS, CSHID and RIP.
2181 */
2182 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2183 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2184 {
2185 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2186 if (rcStrict != VINF_SUCCESS)
2187 return rcStrict;
2188 /** @todo check what VT-x and AMD-V does. */
2189 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2190 }
2191
2192 /* stack */
2193 if (enmEffOpSize == IEMMODE_16BIT)
2194 {
2195 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
2196 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
2197 }
2198 else if (enmEffOpSize == IEMMODE_32BIT)
2199 {
2200 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
2201 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
2202 }
2203 else
2204 {
2205 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
2206 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
2207 }
2208 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2209 if (rcStrict != VINF_SUCCESS)
2210 return rcStrict;
2211
2212 /* commit */
2213 pVCpu->cpum.GstCtx.rip = offSeg;
2214 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2215 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl;
2216 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
2217 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2218 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2219 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2220 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2221 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
2222 /** @todo check if the hidden bits are loaded correctly for 64-bit
2223 * mode. */
2224
2225 /* Flush the prefetch buffer. */
2226 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
2227
2228 return iemRegFinishClearingRF(pVCpu);
2229}
2230
2231
2232/**
2233 * Implements retf.
2234 *
2235 * @param enmEffOpSize The effective operand size.
2236 * @param cbPop The amount of arguments to pop from the stack
2237 * (bytes).
2238 */
2239IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2240{
2241 VBOXSTRICTRC rcStrict;
2242 RTCPTRUNION uPtrFrame;
2243 RTUINT64U NewRsp;
2244 uint64_t uNewRip;
2245 uint16_t uNewCs;
2246 NOREF(cbInstr);
2247
2248 /*
2249 * Read the stack values first.
2250 */
2251 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
2252 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
2253 rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr,
2254 enmEffOpSize == IEMMODE_16BIT ? 1 : enmEffOpSize == IEMMODE_32BIT ? 3 : 7,
2255 &uPtrFrame.pv, &NewRsp.u);
2256 if (rcStrict != VINF_SUCCESS)
2257 return rcStrict;
2258 if (enmEffOpSize == IEMMODE_16BIT)
2259 {
2260 uNewRip = uPtrFrame.pu16[0];
2261 uNewCs = uPtrFrame.pu16[1];
2262 }
2263 else if (enmEffOpSize == IEMMODE_32BIT)
2264 {
2265 uNewRip = uPtrFrame.pu32[0];
2266 uNewCs = uPtrFrame.pu16[2];
2267 }
2268 else
2269 {
2270 uNewRip = uPtrFrame.pu64[0];
2271 uNewCs = uPtrFrame.pu16[4];
2272 }
2273 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2274 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2275 { /* extremely likely */ }
2276 else
2277 return rcStrict;
2278
2279 /*
2280 * Real mode and V8086 mode are easy.
2281 */
2282 /** @todo See comment for similar code in iemCImpl_FarJmp */
2283 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2284 {
2285 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2286 /** @todo check how this is supposed to work if sp=0xfffe. */
2287
2288 /* Check the limit of the new EIP. */
2289 /** @todo Intel pseudo code only does the limit check for 16-bit
2290 * operands, AMD does not make any distinction. What is right? */
2291 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
2292 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2293
2294 /* commit the operation. */
2295 if (cbPop)
2296 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
2297 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2298 pVCpu->cpum.GstCtx.rip = uNewRip;
2299 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2300 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2301 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2302 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4;
2303 return iemRegFinishClearingRF(pVCpu);
2304 }
2305
2306 /*
2307 * Protected mode is complicated, of course.
2308 */
2309 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2310 {
2311 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
2312 return iemRaiseGeneralProtectionFault0(pVCpu);
2313 }
2314
2315 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2316
2317 /* Fetch the descriptor. */
2318 IEMSELDESC DescCs;
2319 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCs, uNewCs, X86_XCPT_GP);
2320 if (rcStrict != VINF_SUCCESS)
2321 return rcStrict;
2322
2323 /* Can only return to a code selector. */
2324 if ( !DescCs.Legacy.Gen.u1DescType
2325 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
2326 {
2327 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
2328 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
2329 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2330 }
2331
2332 /* L vs D. */
2333 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
2334 && DescCs.Legacy.Gen.u1DefBig
2335 && IEM_IS_LONG_MODE(pVCpu))
2336 {
2337 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
2338 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2339 }
2340
2341 /* DPL/RPL/CPL checks. */
2342 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
2343 {
2344 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl));
2345 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2346 }
2347
2348 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2349 {
2350 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
2351 {
2352 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
2353 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2354 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2355 }
2356 }
2357 else
2358 {
2359 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
2360 {
2361 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
2362 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2363 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2364 }
2365 }
2366
2367 /* Is it there? */
2368 if (!DescCs.Legacy.Gen.u1Present)
2369 {
2370 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
2371 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2372 }
2373
2374 /*
2375 * Return to outer privilege? (We'll typically have entered via a call gate.)
2376 */
2377 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
2378 {
2379 /* Read the outer stack pointer stored *after* the parameters. */
2380 rcStrict = iemMemStackPopContinueSpecial(pVCpu, cbPop /*off*/, cbRetPtr, &uPtrFrame.pv, NewRsp.u);
2381 if (rcStrict != VINF_SUCCESS)
2382 return rcStrict;
2383
2384 uint16_t uNewOuterSs;
2385 RTUINT64U NewOuterRsp;
2386 if (enmEffOpSize == IEMMODE_16BIT)
2387 {
2388 NewOuterRsp.u = uPtrFrame.pu16[0];
2389 uNewOuterSs = uPtrFrame.pu16[1];
2390 }
2391 else if (enmEffOpSize == IEMMODE_32BIT)
2392 {
2393 NewOuterRsp.u = uPtrFrame.pu32[0];
2394 uNewOuterSs = uPtrFrame.pu16[2];
2395 }
2396 else
2397 {
2398 NewOuterRsp.u = uPtrFrame.pu64[0];
2399 uNewOuterSs = uPtrFrame.pu16[4];
2400 }
2401 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2402 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2403 { /* extremely likely */ }
2404 else
2405 return rcStrict;
2406
2407 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
2408 and read the selector. */
2409 IEMSELDESC DescSs;
2410 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
2411 {
2412 if ( !DescCs.Legacy.Gen.u1Long
2413 || (uNewOuterSs & X86_SEL_RPL) == 3)
2414 {
2415 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
2416 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2417 return iemRaiseGeneralProtectionFault0(pVCpu);
2418 }
2419 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
2420 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
2421 }
2422 else
2423 {
2424 /* Fetch the descriptor for the new stack segment. */
2425 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);
2426 if (rcStrict != VINF_SUCCESS)
2427 return rcStrict;
2428 }
2429
2430 /* Check that RPL of stack and code selectors match. */
2431 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
2432 {
2433 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2434 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2435 }
2436
2437 /* Must be a writable data segment. */
2438 if ( !DescSs.Legacy.Gen.u1DescType
2439 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2440 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2441 {
2442 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
2443 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
2444 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2445 }
2446
2447 /* L vs D. (Not mentioned by intel.) */
2448 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
2449 && DescSs.Legacy.Gen.u1DefBig
2450 && IEM_IS_LONG_MODE(pVCpu))
2451 {
2452 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
2453 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2454 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2455 }
2456
2457 /* DPL/RPL/CPL checks. */
2458 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2459 {
2460 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
2461 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
2462 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2463 }
2464
2465 /* Is it there? */
2466 if (!DescSs.Legacy.Gen.u1Present)
2467 {
2468 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2469 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2470 }
2471
2472 /* Calc SS limit.*/
2473 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
2474
2475 /* Is RIP canonical or within CS.limit? */
2476 uint64_t u64Base;
2477 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2478
2479 /** @todo Testcase: Is this correct? */
2480 if ( DescCs.Legacy.Gen.u1Long
2481 && IEM_IS_LONG_MODE(pVCpu) )
2482 {
2483 if (!IEM_IS_CANONICAL(uNewRip))
2484 {
2485 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2486 return iemRaiseNotCanonical(pVCpu);
2487 }
2488 u64Base = 0;
2489 }
2490 else
2491 {
2492 if (uNewRip > cbLimitCs)
2493 {
2494 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
2495 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u, cbLimitCs));
2496 /** @todo Intel says this is \#GP(0)! */
2497 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2498 }
2499 u64Base = X86DESC_BASE(&DescCs.Legacy);
2500 }
2501
2502 /*
2503 * Now set the accessed bit before
2504 * writing the return address to the stack and committing the result into
2505 * CS, CSHID and RIP.
2506 */
2507 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
2508 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2509 {
2510 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2511 if (rcStrict != VINF_SUCCESS)
2512 return rcStrict;
2513 /** @todo check what VT-x and AMD-V does. */
2514 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2515 }
2516 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
2517 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2518 {
2519 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewOuterSs);
2520 if (rcStrict != VINF_SUCCESS)
2521 return rcStrict;
2522 /** @todo check what VT-x and AMD-V does. */
2523 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2524 }
2525
2526 /* commit */
2527 if (enmEffOpSize == IEMMODE_16BIT)
2528 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2529 else
2530 pVCpu->cpum.GstCtx.rip = uNewRip;
2531 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2532 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2533 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2534 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2535 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs;
2536 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2537 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
2538 pVCpu->cpum.GstCtx.ss.Sel = uNewOuterSs;
2539 pVCpu->cpum.GstCtx.ss.ValidSel = uNewOuterSs;
2540 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2541 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
2542 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
2543 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2544 pVCpu->cpum.GstCtx.ss.u64Base = 0;
2545 else
2546 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
2547 if (cbPop)
2548 iemRegAddToRspEx(pVCpu, &NewOuterRsp, cbPop);
2549 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2550 pVCpu->cpum.GstCtx.rsp = NewOuterRsp.u;
2551 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2552 pVCpu->cpum.GstCtx.rsp = (uint32_t)NewOuterRsp.u;
2553 else
2554 pVCpu->cpum.GstCtx.sp = (uint16_t)NewOuterRsp.u;
2555
2556 pVCpu->iem.s.uCpl = (uNewCs & X86_SEL_RPL);
2557 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds);
2558 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es);
2559 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs);
2560 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs);
2561
2562 /** @todo check if the hidden bits are loaded correctly for 64-bit
2563 * mode. */
2564 }
2565 /*
2566 * Return to the same privilege level
2567 */
2568 else
2569 {
2570 /* Limit / canonical check. */
2571 uint64_t u64Base;
2572 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2573
2574 /** @todo Testcase: Is this correct? */
2575 if ( DescCs.Legacy.Gen.u1Long
2576 && IEM_IS_LONG_MODE(pVCpu) )
2577 {
2578 if (!IEM_IS_CANONICAL(uNewRip))
2579 {
2580 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
2581 return iemRaiseNotCanonical(pVCpu);
2582 }
2583 u64Base = 0;
2584 }
2585 else
2586 {
2587 if (uNewRip > cbLimitCs)
2588 {
2589 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
2590 /** @todo Intel says this is \#GP(0)! */
2591 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2592 }
2593 u64Base = X86DESC_BASE(&DescCs.Legacy);
2594 }
2595
2596 /*
2597 * Now set the accessed bit before
2598 * writing the return address to the stack and committing the result into
2599 * CS, CSHID and RIP.
2600 */
2601 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2602 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2603 {
2604 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2605 if (rcStrict != VINF_SUCCESS)
2606 return rcStrict;
2607 /** @todo check what VT-x and AMD-V does. */
2608 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2609 }
2610
2611 /* commit */
2612 if (cbPop)
2613 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
2614 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2615 pVCpu->cpum.GstCtx.sp = (uint16_t)NewRsp.u;
2616 else
2617 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2618 if (enmEffOpSize == IEMMODE_16BIT)
2619 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2620 else
2621 pVCpu->cpum.GstCtx.rip = uNewRip;
2622 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2623 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2624 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2625 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2626 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs;
2627 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2628 /** @todo check if the hidden bits are loaded correctly for 64-bit
2629 * mode. */
2630 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
2631 }
2632
2633 /* Flush the prefetch buffer. */
2634 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo use light flush for same privlege? */
2635
2636 return iemRegFinishClearingRF(pVCpu);
2637}
2638
2639
2640/**
2641 * Implements retn and retn imm16.
2642 *
2643 * We're doing this in C because of the \#GP that might be raised if the popped
2644 * program counter is out of bounds.
2645 *
2646 * The hope with this forced inline worker function, is that the compiler will
2647 * be clever enough to eliminate unused code for the constant enmEffOpSize and
2648 * maybe cbPop parameters.
2649 *
2650 * @param pVCpu The cross context virtual CPU structure of the
2651 * calling thread.
2652 * @param cbInstr The current instruction length.
2653 * @param enmEffOpSize The effective operand size. This is constant.
2654 * @param cbPop The amount of arguments to pop from the stack
2655 * (bytes). This can be constant (zero).
2656 */
2657DECL_FORCE_INLINE(VBOXSTRICTRC) iemCImpl_ReturnNearCommon(PVMCPUCC pVCpu, uint8_t cbInstr, IEMMODE enmEffOpSize, uint16_t cbPop)
2658{
2659 /* Fetch the RSP from the stack. */
2660 VBOXSTRICTRC rcStrict;
2661 RTUINT64U NewRip;
2662 RTUINT64U NewRsp;
2663 NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2664
2665 switch (enmEffOpSize)
2666 {
2667 case IEMMODE_16BIT:
2668 NewRip.u = 0;
2669 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRip.Words.w0, &NewRsp);
2670 break;
2671 case IEMMODE_32BIT:
2672 NewRip.u = 0;
2673 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRip.DWords.dw0, &NewRsp);
2674 break;
2675 case IEMMODE_64BIT:
2676 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRip.u, &NewRsp);
2677 break;
2678 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2679 }
2680 if (rcStrict != VINF_SUCCESS)
2681 return rcStrict;
2682
2683 /* Check the new RSP before loading it. */
2684 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
2685 * of it. The canonical test is performed here and for call. */
2686 if (enmEffOpSize != IEMMODE_64BIT)
2687 {
2688 if (RT_LIKELY(NewRip.DWords.dw0 <= pVCpu->cpum.GstCtx.cs.u32Limit))
2689 { /* likely */ }
2690 else
2691 {
2692 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pVCpu->cpum.GstCtx.cs.u32Limit));
2693 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2694 }
2695 }
2696 else
2697 {
2698 if (RT_LIKELY(IEM_IS_CANONICAL(NewRip.u)))
2699 { /* likely */ }
2700 else
2701 {
2702 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
2703 return iemRaiseNotCanonical(pVCpu);
2704 }
2705 }
2706
2707 /* Apply cbPop */
2708 if (cbPop)
2709 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
2710
2711 /* Commit it. */
2712 pVCpu->cpum.GstCtx.rip = NewRip.u;
2713 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2714
2715 /* Flush the prefetch buffer. */
2716 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo only need a light flush here, don't we? We don't really need any flushing... */
2717 RT_NOREF(cbInstr);
2718
2719 return iemRegFinishClearingRF(pVCpu);
2720}
2721
2722
2723/**
2724 * Implements retn imm16 with 16-bit effective operand size.
2725 *
2726 * @param cbPop The amount of arguments to pop from the stack (bytes).
2727 */
2728IEM_CIMPL_DEF_1(iemCImpl_retn_iw_16, uint16_t, cbPop)
2729{
2730 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_16BIT, cbPop);
2731}
2732
2733
2734/**
2735 * Implements retn imm16 with 32-bit effective operand size.
2736 *
2737 * @param cbPop The amount of arguments to pop from the stack (bytes).
2738 */
2739IEM_CIMPL_DEF_1(iemCImpl_retn_iw_32, uint16_t, cbPop)
2740{
2741 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_32BIT, cbPop);
2742}
2743
2744
2745/**
2746 * Implements retn imm16 with 64-bit effective operand size.
2747 *
2748 * @param cbPop The amount of arguments to pop from the stack (bytes).
2749 */
2750IEM_CIMPL_DEF_1(iemCImpl_retn_iw_64, uint16_t, cbPop)
2751{
2752 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_64BIT, cbPop);
2753}
2754
2755
2756/**
2757 * Implements retn with 16-bit effective operand size.
2758 */
2759IEM_CIMPL_DEF_0(iemCImpl_retn_16)
2760{
2761 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_16BIT, 0);
2762}
2763
2764
2765/**
2766 * Implements retn with 32-bit effective operand size.
2767 */
2768IEM_CIMPL_DEF_0(iemCImpl_retn_32)
2769{
2770 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_32BIT, 0);
2771}
2772
2773
2774/**
2775 * Implements retn with 64-bit effective operand size.
2776 */
2777IEM_CIMPL_DEF_0(iemCImpl_retn_64)
2778{
2779 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_64BIT, 0);
2780}
2781
2782
2783/**
2784 * Implements enter.
2785 *
2786 * We're doing this in C because the instruction is insane, even for the
2787 * u8NestingLevel=0 case dealing with the stack is tedious.
2788 *
2789 * @param enmEffOpSize The effective operand size.
2790 * @param cbFrame Frame size.
2791 * @param cParameters Frame parameter count.
2792 */
2793IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
2794{
2795 /* Push RBP, saving the old value in TmpRbp. */
2796 RTUINT64U NewRsp; NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2797 RTUINT64U TmpRbp; TmpRbp.u = pVCpu->cpum.GstCtx.rbp;
2798 RTUINT64U NewRbp;
2799 VBOXSTRICTRC rcStrict;
2800 if (enmEffOpSize == IEMMODE_64BIT)
2801 {
2802 rcStrict = iemMemStackPushU64Ex(pVCpu, TmpRbp.u, &NewRsp);
2803 NewRbp = NewRsp;
2804 }
2805 else if (enmEffOpSize == IEMMODE_32BIT)
2806 {
2807 rcStrict = iemMemStackPushU32Ex(pVCpu, TmpRbp.DWords.dw0, &NewRsp);
2808 NewRbp = NewRsp;
2809 }
2810 else
2811 {
2812 rcStrict = iemMemStackPushU16Ex(pVCpu, TmpRbp.Words.w0, &NewRsp);
2813 NewRbp = TmpRbp;
2814 NewRbp.Words.w0 = NewRsp.Words.w0;
2815 }
2816 if (rcStrict != VINF_SUCCESS)
2817 return rcStrict;
2818
2819 /* Copy the parameters (aka nesting levels by Intel). */
2820 cParameters &= 0x1f;
2821 if (cParameters > 0)
2822 {
2823 switch (enmEffOpSize)
2824 {
2825 case IEMMODE_16BIT:
2826 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2827 TmpRbp.DWords.dw0 -= 2;
2828 else
2829 TmpRbp.Words.w0 -= 2;
2830 do
2831 {
2832 uint16_t u16Tmp;
2833 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Tmp, &TmpRbp);
2834 if (rcStrict != VINF_SUCCESS)
2835 break;
2836 rcStrict = iemMemStackPushU16Ex(pVCpu, u16Tmp, &NewRsp);
2837 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2838 break;
2839
2840 case IEMMODE_32BIT:
2841 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2842 TmpRbp.DWords.dw0 -= 4;
2843 else
2844 TmpRbp.Words.w0 -= 4;
2845 do
2846 {
2847 uint32_t u32Tmp;
2848 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Tmp, &TmpRbp);
2849 if (rcStrict != VINF_SUCCESS)
2850 break;
2851 rcStrict = iemMemStackPushU32Ex(pVCpu, u32Tmp, &NewRsp);
2852 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2853 break;
2854
2855 case IEMMODE_64BIT:
2856 TmpRbp.u -= 8;
2857 do
2858 {
2859 uint64_t u64Tmp;
2860 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Tmp, &TmpRbp);
2861 if (rcStrict != VINF_SUCCESS)
2862 break;
2863 rcStrict = iemMemStackPushU64Ex(pVCpu, u64Tmp, &NewRsp);
2864 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2865 break;
2866
2867 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2868 }
2869 if (rcStrict != VINF_SUCCESS)
2870 return VINF_SUCCESS;
2871
2872 /* Push the new RBP */
2873 if (enmEffOpSize == IEMMODE_64BIT)
2874 rcStrict = iemMemStackPushU64Ex(pVCpu, NewRbp.u, &NewRsp);
2875 else if (enmEffOpSize == IEMMODE_32BIT)
2876 rcStrict = iemMemStackPushU32Ex(pVCpu, NewRbp.DWords.dw0, &NewRsp);
2877 else
2878 rcStrict = iemMemStackPushU16Ex(pVCpu, NewRbp.Words.w0, &NewRsp);
2879 if (rcStrict != VINF_SUCCESS)
2880 return rcStrict;
2881
2882 }
2883
2884 /* Recalc RSP. */
2885 iemRegSubFromRspEx(pVCpu, &NewRsp, cbFrame);
2886
2887 /** @todo Should probe write access at the new RSP according to AMD. */
2888 /** @todo Should handle accesses to the VMX APIC-access page. */
2889
2890 /* Commit it. */
2891 pVCpu->cpum.GstCtx.rbp = NewRbp.u;
2892 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2893 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
2894}
2895
2896
2897
2898/**
2899 * Implements leave.
2900 *
2901 * We're doing this in C because messing with the stack registers is annoying
2902 * since they depends on SS attributes.
2903 *
2904 * @param enmEffOpSize The effective operand size.
2905 */
2906IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
2907{
2908 /* Calculate the intermediate RSP from RBP and the stack attributes. */
2909 RTUINT64U NewRsp;
2910 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2911 NewRsp.u = pVCpu->cpum.GstCtx.rbp;
2912 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2913 NewRsp.u = pVCpu->cpum.GstCtx.ebp;
2914 else
2915 {
2916 /** @todo Check that LEAVE actually preserve the high EBP bits. */
2917 NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2918 NewRsp.Words.w0 = pVCpu->cpum.GstCtx.bp;
2919 }
2920
2921 /* Pop RBP according to the operand size. */
2922 VBOXSTRICTRC rcStrict;
2923 RTUINT64U NewRbp;
2924 switch (enmEffOpSize)
2925 {
2926 case IEMMODE_16BIT:
2927 NewRbp.u = pVCpu->cpum.GstCtx.rbp;
2928 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRbp.Words.w0, &NewRsp);
2929 break;
2930 case IEMMODE_32BIT:
2931 NewRbp.u = 0;
2932 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRbp.DWords.dw0, &NewRsp);
2933 break;
2934 case IEMMODE_64BIT:
2935 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRbp.u, &NewRsp);
2936 break;
2937 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2938 }
2939 if (rcStrict != VINF_SUCCESS)
2940 return rcStrict;
2941
2942
2943 /* Commit it. */
2944 pVCpu->cpum.GstCtx.rbp = NewRbp.u;
2945 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2946 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
2947}
2948
2949
2950/**
2951 * Implements int3 and int XX.
2952 *
2953 * @param u8Int The interrupt vector number.
2954 * @param enmInt The int instruction type.
2955 */
2956IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt)
2957{
2958 Assert(pVCpu->iem.s.cXcptRecursions == 0);
2959
2960 /*
2961 * We must check if this INT3 might belong to DBGF before raising a #BP.
2962 */
2963 if (u8Int == 3)
2964 {
2965 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2966 if (pVM->dbgf.ro.cEnabledInt3Breakpoints == 0)
2967 { /* likely: No vbox debugger breakpoints */ }
2968 else
2969 {
2970 VBOXSTRICTRC rcStrict = DBGFTrap03Handler(pVM, pVCpu, &pVCpu->cpum.GstCtx);
2971 Log(("iemCImpl_int: DBGFTrap03Handler -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2972 if (rcStrict != VINF_EM_RAW_GUEST_TRAP)
2973 return iemSetPassUpStatus(pVCpu, rcStrict);
2974 }
2975 }
2976/** @todo single stepping */
2977 return iemRaiseXcptOrInt(pVCpu,
2978 cbInstr,
2979 u8Int,
2980 IEM_XCPT_FLAGS_T_SOFT_INT | enmInt,
2981 0,
2982 0);
2983}
2984
2985
2986/**
2987 * Implements iret for real mode and V8086 mode.
2988 *
2989 * @param enmEffOpSize The effective operand size.
2990 */
2991IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
2992{
2993 X86EFLAGS Efl;
2994 Efl.u = IEMMISC_GET_EFL(pVCpu);
2995 NOREF(cbInstr);
2996
2997 /*
2998 * iret throws an exception if VME isn't enabled.
2999 */
3000 if ( Efl.Bits.u1VM
3001 && Efl.Bits.u2IOPL != 3
3002 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME))
3003 return iemRaiseGeneralProtectionFault0(pVCpu);
3004
3005 /*
3006 * Do the stack bits, but don't commit RSP before everything checks
3007 * out right.
3008 */
3009 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3010 VBOXSTRICTRC rcStrict;
3011 RTCPTRUNION uFrame;
3012 uint16_t uNewCs;
3013 uint32_t uNewEip;
3014 uint32_t uNewFlags;
3015 uint64_t uNewRsp;
3016 if (enmEffOpSize == IEMMODE_32BIT)
3017 {
3018 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 1, &uFrame.pv, &uNewRsp);
3019 if (rcStrict != VINF_SUCCESS)
3020 return rcStrict;
3021 uNewEip = uFrame.pu32[0];
3022 if (uNewEip > UINT16_MAX)
3023 return iemRaiseGeneralProtectionFault0(pVCpu);
3024
3025 uNewCs = (uint16_t)uFrame.pu32[1];
3026 uNewFlags = uFrame.pu32[2];
3027 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3028 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
3029 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
3030 | X86_EFL_ID;
3031 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3032 uNewFlags &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3033 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
3034 }
3035 else
3036 {
3037 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &uNewRsp);
3038 if (rcStrict != VINF_SUCCESS)
3039 return rcStrict;
3040 uNewEip = uFrame.pu16[0];
3041 uNewCs = uFrame.pu16[1];
3042 uNewFlags = uFrame.pu16[2];
3043 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3044 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
3045 uNewFlags |= Efl.u & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF);
3046 /** @todo The intel pseudo code does not indicate what happens to
3047 * reserved flags. We just ignore them. */
3048 /* Ancient CPU adjustments: See iemCImpl_popf. */
3049 if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286)
3050 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL);
3051 }
3052 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uFrame.pv);
3053 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3054 { /* extremely likely */ }
3055 else
3056 return rcStrict;
3057
3058 /** @todo Check how this is supposed to work if sp=0xfffe. */
3059 Log7(("iemCImpl_iret_real_v8086: uNewCs=%#06x uNewRip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n",
3060 uNewCs, uNewEip, uNewFlags, uNewRsp));
3061
3062 /*
3063 * Check the limit of the new EIP.
3064 */
3065 /** @todo Only the AMD pseudo code check the limit here, what's
3066 * right? */
3067 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
3068 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
3069
3070 /*
3071 * V8086 checks and flag adjustments
3072 */
3073 if (Efl.Bits.u1VM)
3074 {
3075 if (Efl.Bits.u2IOPL == 3)
3076 {
3077 /* Preserve IOPL and clear RF. */
3078 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
3079 uNewFlags |= Efl.u & (X86_EFL_IOPL);
3080 }
3081 else if ( enmEffOpSize == IEMMODE_16BIT
3082 && ( !(uNewFlags & X86_EFL_IF)
3083 || !Efl.Bits.u1VIP )
3084 && !(uNewFlags & X86_EFL_TF) )
3085 {
3086 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
3087 uNewFlags &= ~X86_EFL_VIF;
3088 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
3089 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
3090 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
3091 }
3092 else
3093 return iemRaiseGeneralProtectionFault0(pVCpu);
3094 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags));
3095 }
3096
3097 /*
3098 * Commit the operation.
3099 */
3100#ifdef DBGFTRACE_ENABLED
3101 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
3102 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewRsp);
3103#endif
3104 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3105 pVCpu->cpum.GstCtx.rip = uNewEip;
3106 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3107 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3108 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3109 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4;
3110 /** @todo do we load attribs and limit as well? */
3111 Assert(uNewFlags & X86_EFL_1);
3112 IEMMISC_SET_EFL(pVCpu, uNewFlags);
3113
3114 /* Flush the prefetch buffer. */
3115 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo can do light flush in real mode at least */
3116
3117/** @todo single stepping */
3118 return VINF_SUCCESS;
3119}
3120
3121
3122/**
3123 * Loads a segment register when entering V8086 mode.
3124 *
3125 * @param pSReg The segment register.
3126 * @param uSeg The segment to load.
3127 */
3128static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
3129{
3130 pSReg->Sel = uSeg;
3131 pSReg->ValidSel = uSeg;
3132 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3133 pSReg->u64Base = (uint32_t)uSeg << 4;
3134 pSReg->u32Limit = 0xffff;
3135 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
3136 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
3137 * IRET'ing to V8086. */
3138}
3139
3140
3141/**
3142 * Implements iret for protected mode returning to V8086 mode.
3143 *
3144 * @param uNewEip The new EIP.
3145 * @param uNewCs The new CS.
3146 * @param uNewFlags The new EFLAGS.
3147 * @param uNewRsp The RSP after the initial IRET frame.
3148 *
3149 * @note This can only be a 32-bit iret du to the X86_EFL_VM position.
3150 */
3151IEM_CIMPL_DEF_4(iemCImpl_iret_prot_v8086, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp)
3152{
3153 RT_NOREF_PV(cbInstr);
3154 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK);
3155
3156 /*
3157 * Pop the V8086 specific frame bits off the stack.
3158 */
3159 VBOXSTRICTRC rcStrict;
3160 RTCPTRUNION uFrame;
3161 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 24 /*cbMem*/, &uFrame.pv, uNewRsp);
3162 if (rcStrict != VINF_SUCCESS)
3163 return rcStrict;
3164 uint32_t uNewEsp = uFrame.pu32[0];
3165 uint16_t uNewSs = uFrame.pu32[1];
3166 uint16_t uNewEs = uFrame.pu32[2];
3167 uint16_t uNewDs = uFrame.pu32[3];
3168 uint16_t uNewFs = uFrame.pu32[4];
3169 uint16_t uNewGs = uFrame.pu32[5];
3170 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3171 if (rcStrict != VINF_SUCCESS)
3172 return rcStrict;
3173
3174 /*
3175 * Commit the operation.
3176 */
3177 uNewFlags &= X86_EFL_LIVE_MASK;
3178 uNewFlags |= X86_EFL_RA1_MASK;
3179#ifdef DBGFTRACE_ENABLED
3180 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
3181 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);
3182#endif
3183 Log7(("iemCImpl_iret_prot_v8086: %04x:%08x -> %04x:%04x %x %04x:%04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp));
3184
3185 IEMMISC_SET_EFL(pVCpu, uNewFlags);
3186 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.cs, uNewCs);
3187 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ss, uNewSs);
3188 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.es, uNewEs);
3189 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ds, uNewDs);
3190 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.fs, uNewFs);
3191 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.gs, uNewGs);
3192 pVCpu->cpum.GstCtx.rip = (uint16_t)uNewEip;
3193 pVCpu->cpum.GstCtx.rsp = uNewEsp; /** @todo check this out! */
3194 pVCpu->iem.s.uCpl = 3;
3195
3196 /* Flush the prefetch buffer. */
3197 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
3198
3199/** @todo single stepping */
3200 return VINF_SUCCESS;
3201}
3202
3203
3204/**
3205 * Implements iret for protected mode returning via a nested task.
3206 *
3207 * @param enmEffOpSize The effective operand size.
3208 */
3209IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
3210{
3211 Log7(("iemCImpl_iret_prot_NestedTask:\n"));
3212#ifndef IEM_IMPLEMENTS_TASKSWITCH
3213 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
3214#else
3215 RT_NOREF_PV(enmEffOpSize);
3216
3217 /*
3218 * Read the segment selector in the link-field of the current TSS.
3219 */
3220 RTSEL uSelRet;
3221 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &uSelRet, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base);
3222 if (rcStrict != VINF_SUCCESS)
3223 return rcStrict;
3224
3225 /*
3226 * Fetch the returning task's TSS descriptor from the GDT.
3227 */
3228 if (uSelRet & X86_SEL_LDT)
3229 {
3230 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet));
3231 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet);
3232 }
3233
3234 IEMSELDESC TssDesc;
3235 rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelRet, X86_XCPT_GP);
3236 if (rcStrict != VINF_SUCCESS)
3237 return rcStrict;
3238
3239 if (TssDesc.Legacy.Gate.u1DescType)
3240 {
3241 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet));
3242 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3243 }
3244
3245 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
3246 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
3247 {
3248 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
3249 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3250 }
3251
3252 if (!TssDesc.Legacy.Gate.u1Present)
3253 {
3254 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet));
3255 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3256 }
3257
3258 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
3259 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
3260 0 /* uCr2 */, uSelRet, &TssDesc);
3261#endif
3262}
3263
3264
3265/**
3266 * Implements iret for protected mode
3267 *
3268 * @param enmEffOpSize The effective operand size.
3269 */
3270IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
3271{
3272 NOREF(cbInstr);
3273 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3274
3275 /*
3276 * Nested task return.
3277 */
3278 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT)
3279 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
3280
3281 /*
3282 * Normal return.
3283 *
3284 * Do the stack bits, but don't commit RSP before everything checks
3285 * out right.
3286 */
3287 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3288 VBOXSTRICTRC rcStrict;
3289 RTCPTRUNION uFrame;
3290 uint16_t uNewCs;
3291 uint32_t uNewEip;
3292 uint32_t uNewFlags;
3293 uint64_t uNewRsp;
3294 if (enmEffOpSize == IEMMODE_32BIT)
3295 {
3296 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 3, &uFrame.pv, &uNewRsp);
3297 if (rcStrict != VINF_SUCCESS)
3298 return rcStrict;
3299 uNewEip = uFrame.pu32[0];
3300 uNewCs = (uint16_t)uFrame.pu32[1];
3301 uNewFlags = uFrame.pu32[2];
3302 }
3303 else
3304 {
3305 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &uNewRsp);
3306 if (rcStrict != VINF_SUCCESS)
3307 return rcStrict;
3308 uNewEip = uFrame.pu16[0];
3309 uNewCs = uFrame.pu16[1];
3310 uNewFlags = uFrame.pu16[2];
3311 }
3312 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3313 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3314 { /* extremely likely */ }
3315 else
3316 return rcStrict;
3317 Log7(("iemCImpl_iret_prot: uNewCs=%#06x uNewEip=%#010x uNewFlags=%#x uNewRsp=%#18llx uCpl=%u\n", uNewCs, uNewEip, uNewFlags, uNewRsp, pVCpu->iem.s.uCpl));
3318
3319 /*
3320 * We're hopefully not returning to V8086 mode...
3321 */
3322 if ( (uNewFlags & X86_EFL_VM)
3323 && pVCpu->iem.s.uCpl == 0)
3324 {
3325 Assert(enmEffOpSize == IEMMODE_32BIT);
3326 return IEM_CIMPL_CALL_4(iemCImpl_iret_prot_v8086, uNewEip, uNewCs, uNewFlags, uNewRsp);
3327 }
3328
3329 /*
3330 * Protected mode.
3331 */
3332 /* Read the CS descriptor. */
3333 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3334 {
3335 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
3336 return iemRaiseGeneralProtectionFault0(pVCpu);
3337 }
3338
3339 IEMSELDESC DescCS;
3340 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3341 if (rcStrict != VINF_SUCCESS)
3342 {
3343 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
3344 return rcStrict;
3345 }
3346
3347 /* Must be a code descriptor. */
3348 if (!DescCS.Legacy.Gen.u1DescType)
3349 {
3350 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3351 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3352 }
3353 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3354 {
3355 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3356 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3357 }
3358
3359 /* Privilege checks. */
3360 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3361 {
3362 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3363 {
3364 Log(("iret %04x:%08x - RPL != DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3365 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3366 }
3367 }
3368 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3369 {
3370 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3371 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3372 }
3373 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
3374 {
3375 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pVCpu->iem.s.uCpl));
3376 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3377 }
3378
3379 /* Present? */
3380 if (!DescCS.Legacy.Gen.u1Present)
3381 {
3382 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
3383 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3384 }
3385
3386 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3387
3388 /*
3389 * Return to outer level?
3390 */
3391 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
3392 {
3393 uint16_t uNewSS;
3394 uint32_t uNewESP;
3395 if (enmEffOpSize == IEMMODE_32BIT)
3396 {
3397 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0/*off*/, 8 /*cbMem*/, &uFrame.pv, uNewRsp);
3398 if (rcStrict != VINF_SUCCESS)
3399 return rcStrict;
3400/** @todo We might be popping a 32-bit ESP from the IRET frame, but whether
3401 * 16-bit or 32-bit are being loaded into SP depends on the D/B
3402 * bit of the popped SS selector it turns out. */
3403 uNewESP = uFrame.pu32[0];
3404 uNewSS = (uint16_t)uFrame.pu32[1];
3405 }
3406 else
3407 {
3408 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 4 /*cbMem*/, &uFrame.pv, uNewRsp);
3409 if (rcStrict != VINF_SUCCESS)
3410 return rcStrict;
3411 uNewESP = uFrame.pu16[0];
3412 uNewSS = uFrame.pu16[1];
3413 }
3414 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
3415 if (rcStrict != VINF_SUCCESS)
3416 return rcStrict;
3417 Log7(("iemCImpl_iret_prot: uNewSS=%#06x uNewESP=%#010x\n", uNewSS, uNewESP));
3418
3419 /* Read the SS descriptor. */
3420 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3421 {
3422 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
3423 return iemRaiseGeneralProtectionFault0(pVCpu);
3424 }
3425
3426 IEMSELDESC DescSS;
3427 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
3428 if (rcStrict != VINF_SUCCESS)
3429 {
3430 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
3431 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
3432 return rcStrict;
3433 }
3434
3435 /* Privilege checks. */
3436 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3437 {
3438 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
3439 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3440 }
3441 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3442 {
3443 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
3444 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
3445 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3446 }
3447
3448 /* Must be a writeable data segment descriptor. */
3449 if (!DescSS.Legacy.Gen.u1DescType)
3450 {
3451 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
3452 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3453 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3454 }
3455 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3456 {
3457 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
3458 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3459 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3460 }
3461
3462 /* Present? */
3463 if (!DescSS.Legacy.Gen.u1Present)
3464 {
3465 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
3466 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
3467 }
3468
3469 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3470
3471 /* Check EIP. */
3472 if (uNewEip > cbLimitCS)
3473 {
3474 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
3475 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
3476 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3477 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3478 }
3479
3480 /*
3481 * Commit the changes, marking CS and SS accessed first since
3482 * that may fail.
3483 */
3484 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3485 {
3486 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3487 if (rcStrict != VINF_SUCCESS)
3488 return rcStrict;
3489 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3490 }
3491 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3492 {
3493 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3494 if (rcStrict != VINF_SUCCESS)
3495 return rcStrict;
3496 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3497 }
3498
3499 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3500 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3501 if (enmEffOpSize != IEMMODE_16BIT)
3502 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3503 if (pVCpu->iem.s.uCpl == 0)
3504 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3505 else if (pVCpu->iem.s.uCpl <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL)
3506 fEFlagsMask |= X86_EFL_IF;
3507 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3508 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3509 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu);
3510 fEFlagsNew &= ~fEFlagsMask;
3511 fEFlagsNew |= uNewFlags & fEFlagsMask;
3512#ifdef DBGFTRACE_ENABLED
3513 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
3514 pVCpu->iem.s.uCpl, uNewCs & X86_SEL_RPL, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3515 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP);
3516#endif
3517
3518 IEMMISC_SET_EFL(pVCpu, fEFlagsNew);
3519 pVCpu->cpum.GstCtx.rip = uNewEip;
3520 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3521 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3522 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3523 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3524 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3525 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3526 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
3527
3528 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3529 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3530 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3531 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3532 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
3533 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3534 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3535 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewESP;
3536 else
3537 pVCpu->cpum.GstCtx.rsp = uNewESP;
3538
3539 pVCpu->iem.s.uCpl = uNewCs & X86_SEL_RPL;
3540 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds);
3541 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es);
3542 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs);
3543 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs);
3544
3545 /* Done! */
3546
3547 }
3548 /*
3549 * Return to the same level.
3550 */
3551 else
3552 {
3553 /* Check EIP. */
3554 if (uNewEip > cbLimitCS)
3555 {
3556 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
3557 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3558 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3559 }
3560
3561 /*
3562 * Commit the changes, marking CS first since it may fail.
3563 */
3564 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3565 {
3566 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3567 if (rcStrict != VINF_SUCCESS)
3568 return rcStrict;
3569 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3570 }
3571
3572 X86EFLAGS NewEfl;
3573 NewEfl.u = IEMMISC_GET_EFL(pVCpu);
3574 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3575 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3576 if (enmEffOpSize != IEMMODE_16BIT)
3577 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3578 if (pVCpu->iem.s.uCpl == 0)
3579 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3580 else if (pVCpu->iem.s.uCpl <= NewEfl.Bits.u2IOPL)
3581 fEFlagsMask |= X86_EFL_IF;
3582 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3583 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3584 NewEfl.u &= ~fEFlagsMask;
3585 NewEfl.u |= fEFlagsMask & uNewFlags;
3586#ifdef DBGFTRACE_ENABLED
3587 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
3588 pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3589 uNewCs, uNewEip, uNewFlags, pVCpu->cpum.GstCtx.ss.Sel, uNewRsp);
3590#endif
3591
3592 IEMMISC_SET_EFL(pVCpu, NewEfl.u);
3593 pVCpu->cpum.GstCtx.rip = uNewEip;
3594 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3595 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3596 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3597 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3598 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3599 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3600 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
3601 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3602 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
3603 else
3604 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3605 /* Done! */
3606 }
3607
3608 /* Flush the prefetch buffer. */
3609 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo may light flush if same ring? */
3610
3611/** @todo single stepping */
3612 return VINF_SUCCESS;
3613}
3614
3615
3616/**
3617 * Implements iret for long mode
3618 *
3619 * @param enmEffOpSize The effective operand size.
3620 */
3621IEM_CIMPL_DEF_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize)
3622{
3623 NOREF(cbInstr);
3624
3625 /*
3626 * Nested task return is not supported in long mode.
3627 */
3628 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT)
3629 {
3630 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.eflags.u));
3631 return iemRaiseGeneralProtectionFault0(pVCpu);
3632 }
3633
3634 /*
3635 * Normal return.
3636 *
3637 * Do the stack bits, but don't commit RSP before everything checks
3638 * out right.
3639 */
3640 VBOXSTRICTRC rcStrict;
3641 RTCPTRUNION uFrame;
3642 uint64_t uNewRip;
3643 uint16_t uNewCs;
3644 uint16_t uNewSs;
3645 uint32_t uNewFlags;
3646 uint64_t uNewRsp;
3647 if (enmEffOpSize == IEMMODE_64BIT)
3648 {
3649 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, 7, &uFrame.pv, &uNewRsp);
3650 if (rcStrict != VINF_SUCCESS)
3651 return rcStrict;
3652 uNewRip = uFrame.pu64[0];
3653 uNewCs = (uint16_t)uFrame.pu64[1];
3654 uNewFlags = (uint32_t)uFrame.pu64[2];
3655 uNewRsp = uFrame.pu64[3];
3656 uNewSs = (uint16_t)uFrame.pu64[4];
3657 }
3658 else if (enmEffOpSize == IEMMODE_32BIT)
3659 {
3660 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, 3, &uFrame.pv, &uNewRsp);
3661 if (rcStrict != VINF_SUCCESS)
3662 return rcStrict;
3663 uNewRip = uFrame.pu32[0];
3664 uNewCs = (uint16_t)uFrame.pu32[1];
3665 uNewFlags = uFrame.pu32[2];
3666 uNewRsp = uFrame.pu32[3];
3667 uNewSs = (uint16_t)uFrame.pu32[4];
3668 }
3669 else
3670 {
3671 Assert(enmEffOpSize == IEMMODE_16BIT);
3672 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, 1, &uFrame.pv, &uNewRsp);
3673 if (rcStrict != VINF_SUCCESS)
3674 return rcStrict;
3675 uNewRip = uFrame.pu16[0];
3676 uNewCs = uFrame.pu16[1];
3677 uNewFlags = uFrame.pu16[2];
3678 uNewRsp = uFrame.pu16[3];
3679 uNewSs = uFrame.pu16[4];
3680 }
3681 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3682 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3683 { /* extremely like */ }
3684 else
3685 return rcStrict;
3686 Log7(("iretq stack: cs:rip=%04x:%016RX64 rflags=%016RX64 ss:rsp=%04x:%016RX64\n", uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
3687
3688 /*
3689 * Check stuff.
3690 */
3691 /* Read the CS descriptor. */
3692 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3693 {
3694 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3695 return iemRaiseGeneralProtectionFault0(pVCpu);
3696 }
3697
3698 IEMSELDESC DescCS;
3699 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3700 if (rcStrict != VINF_SUCCESS)
3701 {
3702 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
3703 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3704 return rcStrict;
3705 }
3706
3707 /* Must be a code descriptor. */
3708 if ( !DescCS.Legacy.Gen.u1DescType
3709 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3710 {
3711 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
3712 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3713 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3714 }
3715
3716 /* Privilege checks. */
3717 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
3718 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3719 {
3720 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3721 {
3722 Log(("iret %04x:%016RX64 - RPL != DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3723 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3724 }
3725 }
3726 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3727 {
3728 Log(("iret %04x:%016RX64 - RPL < DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3729 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3730 }
3731 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
3732 {
3733 Log(("iret %04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl));
3734 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3735 }
3736
3737 /* Present? */
3738 if (!DescCS.Legacy.Gen.u1Present)
3739 {
3740 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3741 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3742 }
3743
3744 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3745
3746 /* Read the SS descriptor. */
3747 IEMSELDESC DescSS;
3748 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3749 {
3750 if ( !DescCS.Legacy.Gen.u1Long
3751 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
3752 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
3753 {
3754 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3755 return iemRaiseGeneralProtectionFault0(pVCpu);
3756 }
3757 /* Make sure SS is sensible, marked as accessed etc. */
3758 iemMemFakeStackSelDesc(&DescSS, (uNewSs & X86_SEL_RPL));
3759 }
3760 else
3761 {
3762 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
3763 if (rcStrict != VINF_SUCCESS)
3764 {
3765 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
3766 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3767 return rcStrict;
3768 }
3769 }
3770
3771 /* Privilege checks. */
3772 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3773 {
3774 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3775 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3776 }
3777
3778 uint32_t cbLimitSs;
3779 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3780 cbLimitSs = UINT32_MAX;
3781 else
3782 {
3783 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3784 {
3785 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
3786 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
3787 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3788 }
3789
3790 /* Must be a writeable data segment descriptor. */
3791 if (!DescSS.Legacy.Gen.u1DescType)
3792 {
3793 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
3794 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3795 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3796 }
3797 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3798 {
3799 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
3800 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3801 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3802 }
3803
3804 /* Present? */
3805 if (!DescSS.Legacy.Gen.u1Present)
3806 {
3807 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3808 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSs);
3809 }
3810 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3811 }
3812
3813 /* Check EIP. */
3814 if (DescCS.Legacy.Gen.u1Long)
3815 {
3816 if (!IEM_IS_CANONICAL(uNewRip))
3817 {
3818 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
3819 uNewCs, uNewRip, uNewSs, uNewRsp));
3820 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3821 }
3822 }
3823 else
3824 {
3825 if (uNewRip > cbLimitCS)
3826 {
3827 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
3828 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
3829 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3830 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3831 }
3832 }
3833
3834 /*
3835 * Commit the changes, marking CS and SS accessed first since
3836 * that may fail.
3837 */
3838 /** @todo where exactly are these actually marked accessed by a real CPU? */
3839 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3840 {
3841 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3842 if (rcStrict != VINF_SUCCESS)
3843 return rcStrict;
3844 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3845 }
3846 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3847 {
3848 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSs);
3849 if (rcStrict != VINF_SUCCESS)
3850 return rcStrict;
3851 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3852 }
3853
3854 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3855 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3856 if (enmEffOpSize != IEMMODE_16BIT)
3857 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3858 if (pVCpu->iem.s.uCpl == 0)
3859 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
3860 else if (pVCpu->iem.s.uCpl <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL)
3861 fEFlagsMask |= X86_EFL_IF;
3862 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu);
3863 fEFlagsNew &= ~fEFlagsMask;
3864 fEFlagsNew |= uNewFlags & fEFlagsMask;
3865#ifdef DBGFTRACE_ENABLED
3866 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
3867 pVCpu->iem.s.uCpl, uNewCpl, pVCpu->cpum.GstCtx.rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);
3868#endif
3869
3870 IEMMISC_SET_EFL(pVCpu, fEFlagsNew);
3871 pVCpu->cpum.GstCtx.rip = uNewRip;
3872 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3873 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3874 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3875 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3876 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3877 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3878 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
3879 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long || pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig)
3880 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3881 else
3882 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
3883 pVCpu->cpum.GstCtx.ss.Sel = uNewSs;
3884 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs;
3885 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3886 {
3887 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3888 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
3889 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3890 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3891 Log2(("iretq new SS: NULL\n"));
3892 }
3893 else
3894 {
3895 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3896 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3897 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
3898 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3899 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u));
3900 }
3901
3902 if (pVCpu->iem.s.uCpl != uNewCpl)
3903 {
3904 pVCpu->iem.s.uCpl = uNewCpl;
3905 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.ds);
3906 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.es);
3907 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.fs);
3908 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.gs);
3909 }
3910
3911 /* Flush the prefetch buffer. */
3912 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo may light flush if the ring + mode doesn't change */
3913
3914/** @todo single stepping */
3915 return VINF_SUCCESS;
3916}
3917
3918
3919/**
3920 * Implements iret.
3921 *
3922 * @param enmEffOpSize The effective operand size.
3923 */
3924IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
3925{
3926 bool fBlockingNmi = CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx);
3927
3928#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3929 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3930 {
3931 /*
3932 * Record whether NMI (or virtual-NMI) blocking is in effect during the execution
3933 * of this IRET instruction. We need to provide this information as part of some
3934 * VM-exits.
3935 *
3936 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events".
3937 */
3938 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_VIRT_NMI))
3939 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking;
3940 else
3941 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = fBlockingNmi;
3942
3943 /*
3944 * If "NMI exiting" is set, IRET does not affect blocking of NMIs.
3945 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3946 */
3947 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_NMI_EXIT))
3948 fBlockingNmi = false;
3949
3950 /* Clear virtual-NMI blocking, if any, before causing any further exceptions. */
3951 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false;
3952 }
3953#endif
3954
3955 /*
3956 * The SVM nested-guest intercept for IRET takes priority over all exceptions,
3957 * The NMI is still held pending (which I assume means blocking of further NMIs
3958 * is in effect).
3959 *
3960 * See AMD spec. 15.9 "Instruction Intercepts".
3961 * See AMD spec. 15.21.9 "NMI Support".
3962 */
3963 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IRET))
3964 {
3965 Log(("iret: Guest intercept -> #VMEXIT\n"));
3966 IEM_SVM_UPDATE_NRIP(pVCpu);
3967 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3968 }
3969
3970 /*
3971 * Clear NMI blocking, if any, before causing any further exceptions.
3972 * See Intel spec. 6.7.1 "Handling Multiple NMIs".
3973 */
3974 if (fBlockingNmi)
3975 CPUMClearInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3976
3977 /*
3978 * Call a mode specific worker.
3979 */
3980 VBOXSTRICTRC rcStrict;
3981 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
3982 rcStrict = IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
3983 else
3984 {
3985 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3986 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
3987 rcStrict = IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize);
3988 else
3989 rcStrict = IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
3990 }
3991
3992#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3993 /*
3994 * Clear NMI unblocking IRET state with the completion of IRET.
3995 */
3996 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3997 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = false;
3998#endif
3999 return rcStrict;
4000}
4001
4002
4003static void iemLoadallSetSelector(PVMCPUCC pVCpu, uint8_t iSegReg, uint16_t uSel)
4004{
4005 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4006
4007 pHid->Sel = uSel;
4008 pHid->ValidSel = uSel;
4009 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4010}
4011
4012
4013static void iemLoadall286SetDescCache(PVMCPUCC pVCpu, uint8_t iSegReg, uint8_t const *pbMem)
4014{
4015 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4016
4017 /* The base is in the first three bytes. */
4018 pHid->u64Base = pbMem[0] + (pbMem[1] << 8) + (pbMem[2] << 16);
4019 /* The attributes are in the fourth byte. */
4020 pHid->Attr.u = pbMem[3];
4021 /* The limit is in the last two bytes. */
4022 pHid->u32Limit = pbMem[4] + (pbMem[5] << 8);
4023}
4024
4025
4026/**
4027 * Implements 286 LOADALL (286 CPUs only).
4028 */
4029IEM_CIMPL_DEF_0(iemCImpl_loadall286)
4030{
4031 NOREF(cbInstr);
4032
4033 /* Data is loaded from a buffer at 800h. No checks are done on the
4034 * validity of loaded state.
4035 *
4036 * LOADALL only loads the internal CPU state, it does not access any
4037 * GDT, LDT, or similar tables.
4038 */
4039
4040 if (pVCpu->iem.s.uCpl != 0)
4041 {
4042 Log(("loadall286: CPL must be 0 not %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
4043 return iemRaiseGeneralProtectionFault0(pVCpu);
4044 }
4045
4046 uint8_t const *pbMem = NULL;
4047 uint16_t const *pa16Mem;
4048 uint8_t const *pa8Mem;
4049 RTGCPHYS GCPtrStart = 0x800; /* Fixed table location. */
4050 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pbMem, 0x66, UINT8_MAX, GCPtrStart, IEM_ACCESS_SYS_R, 0);
4051 if (rcStrict != VINF_SUCCESS)
4052 return rcStrict;
4053
4054 /* The MSW is at offset 0x06. */
4055 pa16Mem = (uint16_t const *)(pbMem + 0x06);
4056 /* Even LOADALL can't clear the MSW.PE bit, though it can set it. */
4057 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
4058 uNewCr0 |= *pa16Mem & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
4059 uint64_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
4060
4061 CPUMSetGuestCR0(pVCpu, uNewCr0);
4062 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCr0);
4063
4064 /* Inform PGM if mode changed. */
4065 if ((uNewCr0 & X86_CR0_PE) != (uOldCr0 & X86_CR0_PE))
4066 {
4067 int rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
4068 AssertRCReturn(rc, rc);
4069 /* ignore informational status codes */
4070 }
4071 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
4072 false /* fForce */);
4073
4074 /* TR selector is at offset 0x16. */
4075 pa16Mem = (uint16_t const *)(pbMem + 0x16);
4076 pVCpu->cpum.GstCtx.tr.Sel = pa16Mem[0];
4077 pVCpu->cpum.GstCtx.tr.ValidSel = pa16Mem[0];
4078 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4079
4080 /* Followed by FLAGS... */
4081 pVCpu->cpum.GstCtx.eflags.u = pa16Mem[1] | X86_EFL_1;
4082 pVCpu->cpum.GstCtx.ip = pa16Mem[2]; /* ...and IP. */
4083
4084 /* LDT is at offset 0x1C. */
4085 pa16Mem = (uint16_t const *)(pbMem + 0x1C);
4086 pVCpu->cpum.GstCtx.ldtr.Sel = pa16Mem[0];
4087 pVCpu->cpum.GstCtx.ldtr.ValidSel = pa16Mem[0];
4088 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4089
4090 /* Segment registers are at offset 0x1E. */
4091 pa16Mem = (uint16_t const *)(pbMem + 0x1E);
4092 iemLoadallSetSelector(pVCpu, X86_SREG_DS, pa16Mem[0]);
4093 iemLoadallSetSelector(pVCpu, X86_SREG_SS, pa16Mem[1]);
4094 iemLoadallSetSelector(pVCpu, X86_SREG_CS, pa16Mem[2]);
4095 iemLoadallSetSelector(pVCpu, X86_SREG_ES, pa16Mem[3]);
4096
4097 /* GPRs are at offset 0x26. */
4098 pa16Mem = (uint16_t const *)(pbMem + 0x26);
4099 pVCpu->cpum.GstCtx.di = pa16Mem[0];
4100 pVCpu->cpum.GstCtx.si = pa16Mem[1];
4101 pVCpu->cpum.GstCtx.bp = pa16Mem[2];
4102 pVCpu->cpum.GstCtx.sp = pa16Mem[3];
4103 pVCpu->cpum.GstCtx.bx = pa16Mem[4];
4104 pVCpu->cpum.GstCtx.dx = pa16Mem[5];
4105 pVCpu->cpum.GstCtx.cx = pa16Mem[6];
4106 pVCpu->cpum.GstCtx.ax = pa16Mem[7];
4107
4108 /* Descriptor caches are at offset 0x36, 6 bytes per entry. */
4109 iemLoadall286SetDescCache(pVCpu, X86_SREG_ES, pbMem + 0x36);
4110 iemLoadall286SetDescCache(pVCpu, X86_SREG_CS, pbMem + 0x3C);
4111 iemLoadall286SetDescCache(pVCpu, X86_SREG_SS, pbMem + 0x42);
4112 iemLoadall286SetDescCache(pVCpu, X86_SREG_DS, pbMem + 0x48);
4113
4114 /* GDTR contents are at offset 0x4E, 6 bytes. */
4115 RTGCPHYS GCPtrBase;
4116 uint16_t cbLimit;
4117 pa8Mem = pbMem + 0x4E;
4118 /* NB: Fourth byte "should be zero"; we are ignoring it. */
4119 GCPtrBase = pa8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16);
4120 cbLimit = pa8Mem[4] + (pa8Mem[5] << 8);
4121 CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
4122
4123 /* IDTR contents are at offset 0x5A, 6 bytes. */
4124 pa8Mem = pbMem + 0x5A;
4125 GCPtrBase = pa8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16);
4126 cbLimit = pa8Mem[4] + (pa8Mem[5] << 8);
4127 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
4128
4129 Log(("LOADALL: GDTR:%08RX64/%04X, IDTR:%08RX64/%04X\n", pVCpu->cpum.GstCtx.gdtr.pGdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.idtr.pIdt, pVCpu->cpum.GstCtx.idtr.cbIdt));
4130 Log(("LOADALL: CS:%04X, CS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.cs.u64Base, pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.Attr.u));
4131 Log(("LOADALL: DS:%04X, DS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.ds.u64Base, pVCpu->cpum.GstCtx.ds.u32Limit, pVCpu->cpum.GstCtx.ds.Attr.u));
4132 Log(("LOADALL: ES:%04X, ES base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.es.Sel, pVCpu->cpum.GstCtx.es.u64Base, pVCpu->cpum.GstCtx.es.u32Limit, pVCpu->cpum.GstCtx.es.Attr.u));
4133 Log(("LOADALL: SS:%04X, SS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u));
4134 Log(("LOADALL: SI:%04X, DI:%04X, AX:%04X, BX:%04X, CX:%04X, DX:%04X\n", pVCpu->cpum.GstCtx.si, pVCpu->cpum.GstCtx.di, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.cx, pVCpu->cpum.GstCtx.dx));
4135
4136 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pbMem, IEM_ACCESS_SYS_R);
4137 if (rcStrict != VINF_SUCCESS)
4138 return rcStrict;
4139
4140 /* The CPL may change. It is taken from the "DPL fields of the SS and CS
4141 * descriptor caches" but there is no word as to what happens if those are
4142 * not identical (probably bad things).
4143 */
4144 pVCpu->iem.s.uCpl = pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl;
4145
4146 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS | CPUM_CHANGED_IDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_TR | CPUM_CHANGED_LDTR);
4147
4148 /* Flush the prefetch buffer. */
4149 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4150
4151/** @todo single stepping */
4152 return rcStrict;
4153}
4154
4155
4156/**
4157 * Implements SYSCALL (AMD and Intel64).
4158 */
4159IEM_CIMPL_DEF_0(iemCImpl_syscall)
4160{
4161 /** @todo hack, LOADALL should be decoded as such on a 286. */
4162 if (RT_UNLIKELY(pVCpu->iem.s.uTargetCpu == IEMTARGETCPU_286))
4163 return iemCImpl_loadall286(pVCpu, cbInstr);
4164
4165 /*
4166 * Check preconditions.
4167 *
4168 * Note that CPUs described in the documentation may load a few odd values
4169 * into CS and SS than we allow here. This has yet to be checked on real
4170 * hardware.
4171 */
4172 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE))
4173 {
4174 Log(("syscall: Not enabled in EFER -> #UD\n"));
4175 return iemRaiseUndefinedOpcode(pVCpu);
4176 }
4177 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4178 {
4179 Log(("syscall: Protected mode is required -> #GP(0)\n"));
4180 return iemRaiseGeneralProtectionFault0(pVCpu);
4181 }
4182 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4183 {
4184 Log(("syscall: Only available in long mode on intel -> #UD\n"));
4185 return iemRaiseUndefinedOpcode(pVCpu);
4186 }
4187
4188 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS);
4189
4190 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
4191 /** @todo what about LDT selectors? Shouldn't matter, really. */
4192 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4193 uint16_t uNewSs = uNewCs + 8;
4194 if (uNewCs == 0 || uNewSs == 0)
4195 {
4196 /** @todo Neither Intel nor AMD document this check. */
4197 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4198 return iemRaiseGeneralProtectionFault0(pVCpu);
4199 }
4200
4201 /* Long mode and legacy mode differs. */
4202 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4203 {
4204 uint64_t uNewRip = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.msrLSTAR : pVCpu->cpum.GstCtx. msrCSTAR;
4205
4206 /* This test isn't in the docs, but I'm not trusting the guys writing
4207 the MSRs to have validated the values as canonical like they should. */
4208 if (!IEM_IS_CANONICAL(uNewRip))
4209 {
4210 /** @todo Intel claims this can't happen because IA32_LSTAR MSR can't be written with non-canonical address. */
4211 Log(("syscall: New RIP not canonical -> #UD\n"));
4212 return iemRaiseUndefinedOpcode(pVCpu);
4213 }
4214
4215 /*
4216 * Commit it.
4217 */
4218 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, uNewRip));
4219 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.rip + cbInstr;
4220 pVCpu->cpum.GstCtx.rip = uNewRip;
4221
4222 pVCpu->cpum.GstCtx.rflags.u &= ~X86_EFL_RF;
4223 pVCpu->cpum.GstCtx.r11 = pVCpu->cpum.GstCtx.rflags.u;
4224 pVCpu->cpum.GstCtx.rflags.u &= ~pVCpu->cpum.GstCtx.msrSFMASK;
4225 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_1;
4226
4227 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
4228 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
4229 }
4230 else
4231 {
4232 /*
4233 * Commit it.
4234 */
4235 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, (uint32_t)(pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
4236 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.eip + cbInstr;
4237 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
4238 pVCpu->cpum.GstCtx.rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
4239
4240 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
4241 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
4242 }
4243 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
4244 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
4245 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4246 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4247 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4248
4249 pVCpu->cpum.GstCtx.ss.Sel = uNewSs;
4250 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs;
4251 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4252 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4253 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4254
4255 pVCpu->iem.s.uCpl = 0;
4256 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
4257
4258 /* Flush the prefetch buffer. */
4259 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4260
4261/** @todo single step */
4262 return VINF_SUCCESS;
4263}
4264
4265
4266/**
4267 * Implements SYSRET (AMD and Intel64).
4268 */
4269IEM_CIMPL_DEF_0(iemCImpl_sysret)
4270
4271{
4272 RT_NOREF_PV(cbInstr);
4273
4274 /*
4275 * Check preconditions.
4276 *
4277 * Note that CPUs described in the documentation may load a few odd values
4278 * into CS and SS than we allow here. This has yet to be checked on real
4279 * hardware.
4280 */
4281 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE))
4282 {
4283 Log(("sysret: Not enabled in EFER -> #UD\n"));
4284 return iemRaiseUndefinedOpcode(pVCpu);
4285 }
4286 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4287 {
4288 Log(("sysret: Only available in long mode on intel -> #UD\n"));
4289 return iemRaiseUndefinedOpcode(pVCpu);
4290 }
4291 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4292 {
4293 Log(("sysret: Protected mode is required -> #GP(0)\n"));
4294 return iemRaiseGeneralProtectionFault0(pVCpu);
4295 }
4296 if (pVCpu->iem.s.uCpl != 0)
4297 {
4298 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
4299 return iemRaiseGeneralProtectionFault0(pVCpu);
4300 }
4301
4302 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS);
4303
4304 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
4305 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4306 uint16_t uNewSs = uNewCs + 8;
4307 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
4308 uNewCs += 16;
4309 if (uNewCs == 0 || uNewSs == 0)
4310 {
4311 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4312 return iemRaiseGeneralProtectionFault0(pVCpu);
4313 }
4314
4315 /*
4316 * Commit it.
4317 */
4318 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4319 {
4320 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
4321 {
4322 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.r11));
4323 /* Note! We disregard intel manual regarding the RCX canonical
4324 check, ask intel+xen why AMD doesn't do it. */
4325 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx;
4326 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4327 | (3 << X86DESCATTR_DPL_SHIFT);
4328 }
4329 else
4330 {
4331 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.r11));
4332 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.ecx;
4333 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4334 | (3 << X86DESCATTR_DPL_SHIFT);
4335 }
4336 /** @todo testcase: See what kind of flags we can make SYSRET restore and
4337 * what it really ignores. RF and VM are hinted at being zero, by AMD.
4338 * Intel says: RFLAGS := (R11 & 3C7FD7H) | 2; */
4339 pVCpu->cpum.GstCtx.rflags.u = pVCpu->cpum.GstCtx.r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
4340 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_1;
4341 }
4342 else
4343 {
4344 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx));
4345 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx;
4346 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_IF;
4347 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4348 | (3 << X86DESCATTR_DPL_SHIFT);
4349 }
4350 pVCpu->cpum.GstCtx.cs.Sel = uNewCs | 3;
4351 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs | 3;
4352 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4353 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4354 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4355
4356 pVCpu->cpum.GstCtx.ss.Sel = uNewSs | 3;
4357 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs | 3;
4358 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4359 /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */
4360 pVCpu->cpum.GstCtx.ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
4361 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
4362 * on sysret. */
4363
4364 pVCpu->iem.s.uCpl = 3;
4365 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
4366
4367 /* Flush the prefetch buffer. */
4368 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4369
4370/** @todo single step */
4371 return VINF_SUCCESS;
4372}
4373
4374
4375/**
4376 * Implements SYSENTER (Intel, 32-bit AMD).
4377 */
4378IEM_CIMPL_DEF_0(iemCImpl_sysenter)
4379{
4380 RT_NOREF(cbInstr);
4381
4382 /*
4383 * Check preconditions.
4384 *
4385 * Note that CPUs described in the documentation may load a few odd values
4386 * into CS and SS than we allow here. This has yet to be checked on real
4387 * hardware.
4388 */
4389 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter)
4390 {
4391 Log(("sysenter: not supported -=> #UD\n"));
4392 return iemRaiseUndefinedOpcode(pVCpu);
4393 }
4394 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4395 {
4396 Log(("sysenter: Protected or long mode is required -> #GP(0)\n"));
4397 return iemRaiseGeneralProtectionFault0(pVCpu);
4398 }
4399 bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
4400 if (IEM_IS_GUEST_CPU_AMD(pVCpu) && fIsLongMode)
4401 {
4402 Log(("sysenter: Only available in protected mode on AMD -> #UD\n"));
4403 return iemRaiseUndefinedOpcode(pVCpu);
4404 }
4405 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4406 uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs;
4407 if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0)
4408 {
4409 Log(("sysenter: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs));
4410 return iemRaiseGeneralProtectionFault0(pVCpu);
4411 }
4412
4413 /* This test isn't in the docs, it's just a safeguard against missing
4414 canonical checks when writing the registers. */
4415 if (RT_LIKELY( !fIsLongMode
4416 || ( IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.eip)
4417 && IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.esp))))
4418 { /* likely */ }
4419 else
4420 {
4421 Log(("sysenter: SYSENTER_EIP = %#RX64 or/and SYSENTER_ESP = %#RX64 not canonical -> #GP(0)\n",
4422 pVCpu->cpum.GstCtx.SysEnter.eip, pVCpu->cpum.GstCtx.SysEnter.esp));
4423 return iemRaiseUndefinedOpcode(pVCpu);
4424 }
4425
4426/** @todo Test: Sysenter from ring-0, ring-1 and ring-2. */
4427
4428 /*
4429 * Update registers and commit.
4430 */
4431 if (fIsLongMode)
4432 {
4433 Log(("sysenter: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
4434 pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, pVCpu->cpum.GstCtx.SysEnter.eip));
4435 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.SysEnter.eip;
4436 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.SysEnter.esp;
4437 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4438 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC;
4439 }
4440 else
4441 {
4442 Log(("sysenter: %04x:%08RX32 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs, (uint32_t)pVCpu->cpum.GstCtx.rip,
4443 pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip));
4444 pVCpu->cpum.GstCtx.rip = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip;
4445 pVCpu->cpum.GstCtx.rsp = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.esp;
4446 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4447 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC;
4448 }
4449 pVCpu->cpum.GstCtx.cs.Sel = uNewCs & X86_SEL_MASK_OFF_RPL;
4450 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs & X86_SEL_MASK_OFF_RPL;
4451 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4452 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4453 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4454
4455 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs & X86_SEL_MASK_OFF_RPL) + 8;
4456 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs & X86_SEL_MASK_OFF_RPL) + 8;
4457 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4458 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4459 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4460 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC;
4461 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4462
4463 pVCpu->cpum.GstCtx.rflags.Bits.u1IF = 0;
4464 pVCpu->cpum.GstCtx.rflags.Bits.u1VM = 0;
4465 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
4466
4467 pVCpu->iem.s.uCpl = 0;
4468
4469 /* Flush the prefetch buffer. */
4470 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4471
4472/** @todo single stepping */
4473 return VINF_SUCCESS;
4474}
4475
4476
4477/**
4478 * Implements SYSEXIT (Intel, 32-bit AMD).
4479 *
4480 * @param enmEffOpSize The effective operand size.
4481 */
4482IEM_CIMPL_DEF_1(iemCImpl_sysexit, IEMMODE, enmEffOpSize)
4483{
4484 RT_NOREF(cbInstr);
4485
4486 /*
4487 * Check preconditions.
4488 *
4489 * Note that CPUs described in the documentation may load a few odd values
4490 * into CS and SS than we allow here. This has yet to be checked on real
4491 * hardware.
4492 */
4493 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter)
4494 {
4495 Log(("sysexit: not supported -=> #UD\n"));
4496 return iemRaiseUndefinedOpcode(pVCpu);
4497 }
4498 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4499 {
4500 Log(("sysexit: Protected or long mode is required -> #GP(0)\n"));
4501 return iemRaiseGeneralProtectionFault0(pVCpu);
4502 }
4503 bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
4504 if (IEM_IS_GUEST_CPU_AMD(pVCpu) && fIsLongMode)
4505 {
4506 Log(("sysexit: Only available in protected mode on AMD -> #UD\n"));
4507 return iemRaiseUndefinedOpcode(pVCpu);
4508 }
4509 if (pVCpu->iem.s.uCpl != 0)
4510 {
4511 Log(("sysexit: CPL(=%u) != 0 -> #GP(0)\n", pVCpu->iem.s.uCpl));
4512 return iemRaiseGeneralProtectionFault0(pVCpu);
4513 }
4514 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4515 uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs;
4516 if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0)
4517 {
4518 Log(("sysexit: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs));
4519 return iemRaiseGeneralProtectionFault0(pVCpu);
4520 }
4521
4522 /*
4523 * Update registers and commit.
4524 */
4525 if (enmEffOpSize == IEMMODE_64BIT)
4526 {
4527 Log(("sysexit: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
4528 pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 32, pVCpu->cpum.GstCtx.rcx));
4529 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rdx;
4530 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.rcx;
4531 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4532 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4533 pVCpu->cpum.GstCtx.cs.Sel = (uNewCs | 3) + 32;
4534 pVCpu->cpum.GstCtx.cs.ValidSel = (uNewCs | 3) + 32;
4535 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs | 3) + 40;
4536 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs | 3) + 40;
4537 }
4538 else
4539 {
4540 Log(("sysexit: %04x:%08RX64 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
4541 pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 16, (uint32_t)pVCpu->cpum.GstCtx.edx));
4542 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.edx;
4543 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.ecx;
4544 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4545 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4546 pVCpu->cpum.GstCtx.cs.Sel = (uNewCs | 3) + 16;
4547 pVCpu->cpum.GstCtx.cs.ValidSel = (uNewCs | 3) + 16;
4548 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs | 3) + 24;
4549 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs | 3) + 24;
4550 }
4551 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4552 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4553 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4554
4555 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4556 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4557 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4558 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4559 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4560 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
4561
4562 pVCpu->iem.s.uCpl = 3;
4563/** @todo single stepping */
4564
4565 /* Flush the prefetch buffer. */
4566 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4567
4568 return VINF_SUCCESS;
4569}
4570
4571
4572/**
4573 * Completes a MOV SReg,XXX or POP SReg instruction.
4574 *
4575 * When not modifying SS or when we're already in an interrupt shadow we
4576 * can update RIP and finish the instruction the normal way.
4577 *
4578 * Otherwise, the MOV/POP SS interrupt shadow that we now enable will block
4579 * both TF and DBx events. The TF will be ignored while the DBx ones will
4580 * be delayed till the next instruction boundrary. For more details see
4581 * @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching Stacks}.
4582 */
4583DECLINLINE(VBOXSTRICTRC) iemCImpl_LoadSRegFinish(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iSegReg)
4584{
4585 if (iSegReg != X86_SREG_SS || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
4586 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
4587
4588 iemRegAddToRip(pVCpu, cbInstr);
4589 pVCpu->cpum.GstCtx.eflags.uBoth &= ~X86_EFL_RF; /* Shadow int isn't set and DRx is delayed, so only clear RF. */
4590 CPUMSetInInterruptShadowSs(&pVCpu->cpum.GstCtx);
4591
4592 return VINF_SUCCESS;
4593}
4594
4595
4596/**
4597 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
4598 *
4599 * @param pVCpu The cross context virtual CPU structure of the calling
4600 * thread.
4601 * @param iSegReg The segment register number (valid).
4602 * @param uSel The new selector value.
4603 */
4604static VBOXSTRICTRC iemCImpl_LoadSRegWorker(PVMCPUCC pVCpu, uint8_t iSegReg, uint16_t uSel)
4605{
4606 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4607 uint16_t *pSel = iemSRegRef(pVCpu, iSegReg);
4608 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4609
4610 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
4611
4612 /*
4613 * Real mode and V8086 mode are easy.
4614 */
4615 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4616 {
4617 *pSel = uSel;
4618 pHid->u64Base = (uint32_t)uSel << 4;
4619 pHid->ValidSel = uSel;
4620 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4621#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
4622 /** @todo Does the CPU actually load limits and attributes in the
4623 * real/V8086 mode segment load case? It doesn't for CS in far
4624 * jumps... Affects unreal mode. */
4625 pHid->u32Limit = 0xffff;
4626 pHid->Attr.u = 0;
4627 pHid->Attr.n.u1Present = 1;
4628 pHid->Attr.n.u1DescType = 1;
4629 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
4630 ? X86_SEL_TYPE_RW
4631 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
4632#endif
4633 }
4634 /*
4635 * Protected mode.
4636 *
4637 * Check if it's a null segment selector value first, that's OK for DS, ES,
4638 * FS and GS. If not null, then we have to load and parse the descriptor.
4639 */
4640 else if (!(uSel & X86_SEL_MASK_OFF_RPL))
4641 {
4642 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
4643 if (iSegReg == X86_SREG_SS)
4644 {
4645 /* In 64-bit kernel mode, the stack can be 0 because of the way
4646 interrupts are dispatched. AMD seems to have a slighly more
4647 relaxed relationship to SS.RPL than intel does. */
4648 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
4649 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4650 || pVCpu->iem.s.uCpl > 2
4651 || ( uSel != pVCpu->iem.s.uCpl
4652 && !IEM_IS_GUEST_CPU_AMD(pVCpu)) )
4653 {
4654 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
4655 return iemRaiseGeneralProtectionFault0(pVCpu);
4656 }
4657 }
4658
4659 *pSel = uSel; /* Not RPL, remember :-) */
4660 iemHlpLoadNullDataSelectorProt(pVCpu, pHid, uSel);
4661 if (iSegReg == X86_SREG_SS)
4662 pHid->Attr.u |= pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT;
4663 }
4664 else
4665 {
4666
4667 /* Fetch the descriptor. */
4668 IEMSELDESC Desc;
4669 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
4670 if (rcStrict != VINF_SUCCESS)
4671 return rcStrict;
4672
4673 /* Check GPs first. */
4674 if (!Desc.Legacy.Gen.u1DescType)
4675 {
4676 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
4677 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4678 }
4679 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
4680 {
4681 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4682 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
4683 {
4684 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
4685 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4686 }
4687 if ((uSel & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
4688 {
4689 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pVCpu->iem.s.uCpl));
4690 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4691 }
4692 if (Desc.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl)
4693 {
4694 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4695 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4696 }
4697 }
4698 else
4699 {
4700 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4701 {
4702 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
4703 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4704 }
4705 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4706 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4707 {
4708#if 0 /* this is what intel says. */
4709 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4710 && pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4711 {
4712 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
4713 iSegReg, uSel, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl));
4714 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4715 }
4716#else /* this is what makes more sense. */
4717 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4718 {
4719 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4720 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4721 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4722 }
4723 if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4724 {
4725 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4726 iSegReg, uSel, pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl));
4727 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4728 }
4729#endif
4730 }
4731 }
4732
4733 /* Is it there? */
4734 if (!Desc.Legacy.Gen.u1Present)
4735 {
4736 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4737 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
4738 }
4739
4740 /* The base and limit. */
4741 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4742 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
4743
4744 /*
4745 * Ok, everything checked out fine. Now set the accessed bit before
4746 * committing the result into the registers.
4747 */
4748 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4749 {
4750 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
4751 if (rcStrict != VINF_SUCCESS)
4752 return rcStrict;
4753 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4754 }
4755
4756 /* commit */
4757 *pSel = uSel;
4758 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4759 pHid->u32Limit = cbLimit;
4760 pHid->u64Base = u64Base;
4761 pHid->ValidSel = uSel;
4762 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4763
4764 /** @todo check if the hidden bits are loaded correctly for 64-bit
4765 * mode. */
4766 }
4767
4768 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4769 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4770 return VINF_SUCCESS;
4771}
4772
4773
4774/**
4775 * Implements 'mov SReg, r/m'.
4776 *
4777 * @param iSegReg The segment register number (valid).
4778 * @param uSel The new selector value.
4779 */
4780IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
4781{
4782 VBOXSTRICTRC rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4783 if (rcStrict == VINF_SUCCESS)
4784 rcStrict = iemCImpl_LoadSRegFinish(pVCpu, cbInstr, iSegReg);
4785 return rcStrict;
4786}
4787
4788
4789/**
4790 * Implements 'pop SReg'.
4791 *
4792 * @param iSegReg The segment register number (valid).
4793 * @param enmEffOpSize The efficient operand size (valid).
4794 */
4795IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4796{
4797 VBOXSTRICTRC rcStrict;
4798
4799 /*
4800 * Read the selector off the stack and join paths with mov ss, reg.
4801 */
4802 RTUINT64U TmpRsp;
4803 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
4804 switch (enmEffOpSize)
4805 {
4806 case IEMMODE_16BIT:
4807 {
4808 uint16_t uSel;
4809 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp);
4810 if (rcStrict == VINF_SUCCESS)
4811 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4812 break;
4813 }
4814
4815 case IEMMODE_32BIT:
4816 {
4817 uint32_t u32Value;
4818 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp);
4819 if (rcStrict == VINF_SUCCESS)
4820 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u32Value);
4821 break;
4822 }
4823
4824 case IEMMODE_64BIT:
4825 {
4826 uint64_t u64Value;
4827 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp);
4828 if (rcStrict == VINF_SUCCESS)
4829 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u64Value);
4830 break;
4831 }
4832 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4833 }
4834
4835 /*
4836 * If the load succeeded, commit the stack change and finish the instruction.
4837 */
4838 if (rcStrict == VINF_SUCCESS)
4839 {
4840 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
4841 rcStrict = iemCImpl_LoadSRegFinish(pVCpu, cbInstr, iSegReg);
4842 }
4843
4844 return rcStrict;
4845}
4846
4847
4848/**
4849 * Implements lgs, lfs, les, lds & lss.
4850 */
4851IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg, uint16_t, uSel, uint64_t, offSeg, uint8_t, iSegReg, uint8_t, iGReg, IEMMODE, enmEffOpSize)
4852{
4853 /*
4854 * Use iemCImpl_LoadSRegWorker to do the tricky segment register loading.
4855 */
4856 /** @todo verify and test that mov, pop and lXs works the segment
4857 * register loading in the exact same way. */
4858 VBOXSTRICTRC rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4859 if (rcStrict == VINF_SUCCESS)
4860 {
4861 switch (enmEffOpSize)
4862 {
4863 case IEMMODE_16BIT:
4864 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4865 break;
4866 case IEMMODE_32BIT:
4867 case IEMMODE_64BIT:
4868 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4869 break;
4870 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4871 }
4872 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
4873 }
4874 return rcStrict;
4875}
4876
4877
4878/**
4879 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
4880 *
4881 * @retval VINF_SUCCESS on success.
4882 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
4883 * @retval iemMemFetchSysU64 return value.
4884 *
4885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4886 * @param uSel The selector value.
4887 * @param fAllowSysDesc Whether system descriptors are OK or not.
4888 * @param pDesc Where to return the descriptor on success.
4889 */
4890static VBOXSTRICTRC iemCImpl_LoadDescHelper(PVMCPUCC pVCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
4891{
4892 pDesc->Long.au64[0] = 0;
4893 pDesc->Long.au64[1] = 0;
4894
4895 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */
4896 return VINF_IEM_SELECTOR_NOT_OK;
4897
4898 /* Within the table limits? */
4899 RTGCPTR GCPtrBase;
4900 if (uSel & X86_SEL_LDT)
4901 {
4902 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
4903 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
4904 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
4905 return VINF_IEM_SELECTOR_NOT_OK;
4906 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
4907 }
4908 else
4909 {
4910 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR);
4911 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
4912 return VINF_IEM_SELECTOR_NOT_OK;
4913 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
4914 }
4915
4916 /* Fetch the descriptor. */
4917 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4918 if (rcStrict != VINF_SUCCESS)
4919 return rcStrict;
4920 if (!pDesc->Legacy.Gen.u1DescType)
4921 {
4922 if (!fAllowSysDesc)
4923 return VINF_IEM_SELECTOR_NOT_OK;
4924 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4925 {
4926 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
4927 if (rcStrict != VINF_SUCCESS)
4928 return rcStrict;
4929 }
4930
4931 }
4932
4933 return VINF_SUCCESS;
4934}
4935
4936
4937/**
4938 * Implements verr (fWrite = false) and verw (fWrite = true).
4939 */
4940IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite)
4941{
4942 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4943
4944 /** @todo figure whether the accessed bit is set or not. */
4945
4946 bool fAccessible = true;
4947 IEMSELDESC Desc;
4948 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4949 if (rcStrict == VINF_SUCCESS)
4950 {
4951 /* Check the descriptor, order doesn't matter much here. */
4952 if ( !Desc.Legacy.Gen.u1DescType
4953 || !Desc.Legacy.Gen.u1Present)
4954 fAccessible = false;
4955 else
4956 {
4957 if ( fWrite
4958 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE
4959 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4960 fAccessible = false;
4961
4962 /** @todo testcase for the conforming behavior. */
4963 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4964 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4965 {
4966 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4967 fAccessible = false;
4968 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4969 fAccessible = false;
4970 }
4971 }
4972
4973 }
4974 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4975 fAccessible = false;
4976 else
4977 return rcStrict;
4978
4979 /* commit */
4980 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fAccessible;
4981
4982 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
4983}
4984
4985
4986/**
4987 * Implements LAR and LSL with 64-bit operand size.
4988 *
4989 * @returns VINF_SUCCESS.
4990 * @param pu64Dst Pointer to the destination register.
4991 * @param uSel The selector to load details for.
4992 * @param fIsLar true = LAR, false = LSL.
4993 */
4994IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar)
4995{
4996 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4997
4998 /** @todo figure whether the accessed bit is set or not. */
4999
5000 bool fDescOk = true;
5001 IEMSELDESC Desc;
5002 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, true /*fAllowSysDesc*/, &Desc);
5003 if (rcStrict == VINF_SUCCESS)
5004 {
5005 /*
5006 * Check the descriptor type.
5007 */
5008 if (!Desc.Legacy.Gen.u1DescType)
5009 {
5010 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
5011 {
5012 if (Desc.Long.Gen.u5Zeros)
5013 fDescOk = false;
5014 else
5015 switch (Desc.Long.Gen.u4Type)
5016 {
5017 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
5018 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
5019 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
5020 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
5021 break;
5022 case AMD64_SEL_TYPE_SYS_CALL_GATE:
5023 fDescOk = fIsLar;
5024 break;
5025 default:
5026 fDescOk = false;
5027 break;
5028 }
5029 }
5030 else
5031 {
5032 switch (Desc.Long.Gen.u4Type)
5033 {
5034 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
5035 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
5036 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
5037 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
5038 case X86_SEL_TYPE_SYS_LDT:
5039 break;
5040 case X86_SEL_TYPE_SYS_286_CALL_GATE:
5041 case X86_SEL_TYPE_SYS_TASK_GATE:
5042 case X86_SEL_TYPE_SYS_386_CALL_GATE:
5043 fDescOk = fIsLar;
5044 break;
5045 default:
5046 fDescOk = false;
5047 break;
5048 }
5049 }
5050 }
5051 if (fDescOk)
5052 {
5053 /*
5054 * Check the RPL/DPL/CPL interaction..
5055 */
5056 /** @todo testcase for the conforming behavior. */
5057 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
5058 || !Desc.Legacy.Gen.u1DescType)
5059 {
5060 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
5061 fDescOk = false;
5062 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
5063 fDescOk = false;
5064 }
5065 }
5066
5067 if (fDescOk)
5068 {
5069 /*
5070 * All fine, start committing the result.
5071 */
5072 if (fIsLar)
5073 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00);
5074 else
5075 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy);
5076 }
5077
5078 }
5079 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
5080 fDescOk = false;
5081 else
5082 return rcStrict;
5083
5084 /* commit flags value and advance rip. */
5085 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fDescOk;
5086 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5087}
5088
5089
5090/**
5091 * Implements LAR and LSL with 16-bit operand size.
5092 *
5093 * @returns VINF_SUCCESS.
5094 * @param pu16Dst Pointer to the destination register.
5095 * @param uSel The selector to load details for.
5096 * @param fIsLar true = LAR, false = LSL.
5097 */
5098IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar)
5099{
5100 uint64_t u64TmpDst = *pu16Dst;
5101 IEM_CIMPL_CALL_3(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, fIsLar);
5102 *pu16Dst = u64TmpDst;
5103 return VINF_SUCCESS;
5104}
5105
5106
5107/**
5108 * Implements lgdt.
5109 *
5110 * @param iEffSeg The segment of the new gdtr contents
5111 * @param GCPtrEffSrc The address of the new gdtr contents.
5112 * @param enmEffOpSize The effective operand size.
5113 */
5114IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
5115{
5116 if (pVCpu->iem.s.uCpl != 0)
5117 return iemRaiseGeneralProtectionFault0(pVCpu);
5118 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5119
5120 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5121 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5122 {
5123 Log(("lgdt: Guest intercept -> VM-exit\n"));
5124 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_GDTR_IDTR_ACCESS, VMXINSTRID_LGDT, cbInstr);
5125 }
5126
5127 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_WRITES))
5128 {
5129 Log(("lgdt: Guest intercept -> #VMEXIT\n"));
5130 IEM_SVM_UPDATE_NRIP(pVCpu);
5131 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5132 }
5133
5134 /*
5135 * Fetch the limit and base address.
5136 */
5137 uint16_t cbLimit;
5138 RTGCPTR GCPtrBase;
5139 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
5140 if (rcStrict == VINF_SUCCESS)
5141 {
5142 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
5143 || X86_IS_CANONICAL(GCPtrBase))
5144 {
5145 rcStrict = CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
5146 if (rcStrict == VINF_SUCCESS)
5147 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5148 }
5149 else
5150 {
5151 Log(("iemCImpl_lgdt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
5152 return iemRaiseGeneralProtectionFault0(pVCpu);
5153 }
5154 }
5155 return rcStrict;
5156}
5157
5158
5159/**
5160 * Implements sgdt.
5161 *
5162 * @param iEffSeg The segment where to store the gdtr content.
5163 * @param GCPtrEffDst The address where to store the gdtr content.
5164 */
5165IEM_CIMPL_DEF_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5166{
5167 /*
5168 * Join paths with sidt.
5169 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
5170 * you really must know.
5171 */
5172 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5173 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5174 {
5175 Log(("sgdt: Guest intercept -> VM-exit\n"));
5176 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_GDTR_IDTR_ACCESS, VMXINSTRID_SGDT, cbInstr);
5177 }
5178
5179 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_READS))
5180 {
5181 Log(("sgdt: Guest intercept -> #VMEXIT\n"));
5182 IEM_SVM_UPDATE_NRIP(pVCpu);
5183 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5184 }
5185
5186 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR);
5187 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.gdtr.pGdt, iEffSeg, GCPtrEffDst);
5188 if (rcStrict == VINF_SUCCESS)
5189 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5190 return rcStrict;
5191}
5192
5193
5194/**
5195 * Implements lidt.
5196 *
5197 * @param iEffSeg The segment of the new idtr contents
5198 * @param GCPtrEffSrc The address of the new idtr contents.
5199 * @param enmEffOpSize The effective operand size.
5200 */
5201IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
5202{
5203 if (pVCpu->iem.s.uCpl != 0)
5204 return iemRaiseGeneralProtectionFault0(pVCpu);
5205 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5206
5207 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_WRITES))
5208 {
5209 Log(("lidt: Guest intercept -> #VMEXIT\n"));
5210 IEM_SVM_UPDATE_NRIP(pVCpu);
5211 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5212 }
5213
5214 /*
5215 * Fetch the limit and base address.
5216 */
5217 uint16_t cbLimit;
5218 RTGCPTR GCPtrBase;
5219 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
5220 if (rcStrict == VINF_SUCCESS)
5221 {
5222 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
5223 || X86_IS_CANONICAL(GCPtrBase))
5224 {
5225 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
5226 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5227 }
5228 else
5229 {
5230 Log(("iemCImpl_lidt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
5231 return iemRaiseGeneralProtectionFault0(pVCpu);
5232 }
5233 }
5234 return rcStrict;
5235}
5236
5237
5238/**
5239 * Implements sidt.
5240 *
5241 * @param iEffSeg The segment where to store the idtr content.
5242 * @param GCPtrEffDst The address where to store the idtr content.
5243 */
5244IEM_CIMPL_DEF_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5245{
5246 /*
5247 * Join paths with sgdt.
5248 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
5249 * you really must know.
5250 */
5251 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
5252 {
5253 Log(("sidt: Guest intercept -> #VMEXIT\n"));
5254 IEM_SVM_UPDATE_NRIP(pVCpu);
5255 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5256 }
5257
5258 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_IDTR);
5259 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.idtr.cbIdt, pVCpu->cpum.GstCtx.idtr.pIdt, iEffSeg, GCPtrEffDst);
5260 if (rcStrict == VINF_SUCCESS)
5261 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5262 return rcStrict;
5263}
5264
5265
5266/**
5267 * Implements lldt.
5268 *
5269 * @param uNewLdt The new LDT selector value.
5270 */
5271IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
5272{
5273 /*
5274 * Check preconditions.
5275 */
5276 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5277 {
5278 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
5279 return iemRaiseUndefinedOpcode(pVCpu);
5280 }
5281 if (pVCpu->iem.s.uCpl != 0)
5282 {
5283 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pVCpu->iem.s.uCpl));
5284 return iemRaiseGeneralProtectionFault0(pVCpu);
5285 }
5286 /* Nested-guest VMX intercept. */
5287 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5288 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5289 {
5290 Log(("lldt: Guest intercept -> VM-exit\n"));
5291 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_LLDT, cbInstr);
5292 }
5293 if (uNewLdt & X86_SEL_LDT)
5294 {
5295 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
5296 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewLdt);
5297 }
5298
5299 /*
5300 * Now, loading a NULL selector is easy.
5301 */
5302 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
5303 {
5304 /* Nested-guest SVM intercept. */
5305 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
5306 {
5307 Log(("lldt: Guest intercept -> #VMEXIT\n"));
5308 IEM_SVM_UPDATE_NRIP(pVCpu);
5309 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5310 }
5311
5312 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
5313 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_LDTR;
5314 CPUMSetGuestLDTR(pVCpu, uNewLdt);
5315 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
5316 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
5317 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
5318 {
5319 /* AMD-V seems to leave the base and limit alone. */
5320 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
5321 }
5322 else
5323 {
5324 /* VT-x (Intel 3960x) seems to be doing the following. */
5325 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;
5326 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
5327 pVCpu->cpum.GstCtx.ldtr.u32Limit = UINT32_MAX;
5328 }
5329
5330 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5331 }
5332
5333 /*
5334 * Read the descriptor.
5335 */
5336 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR);
5337 IEMSELDESC Desc;
5338 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
5339 if (rcStrict != VINF_SUCCESS)
5340 return rcStrict;
5341
5342 /* Check GPs first. */
5343 if (Desc.Legacy.Gen.u1DescType)
5344 {
5345 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
5346 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5347 }
5348 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
5349 {
5350 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
5351 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5352 }
5353 uint64_t u64Base;
5354 if (!IEM_IS_LONG_MODE(pVCpu))
5355 u64Base = X86DESC_BASE(&Desc.Legacy);
5356 else
5357 {
5358 if (Desc.Long.Gen.u5Zeros)
5359 {
5360 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
5361 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5362 }
5363
5364 u64Base = X86DESC64_BASE(&Desc.Long);
5365 if (!IEM_IS_CANONICAL(u64Base))
5366 {
5367 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
5368 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5369 }
5370 }
5371
5372 /* NP */
5373 if (!Desc.Legacy.Gen.u1Present)
5374 {
5375 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
5376 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewLdt);
5377 }
5378
5379 /* Nested-guest SVM intercept. */
5380 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
5381 {
5382 Log(("lldt: Guest intercept -> #VMEXIT\n"));
5383 IEM_SVM_UPDATE_NRIP(pVCpu);
5384 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5385 }
5386
5387 /*
5388 * It checks out alright, update the registers.
5389 */
5390/** @todo check if the actual value is loaded or if the RPL is dropped */
5391 CPUMSetGuestLDTR(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5392 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
5393 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
5394 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5395 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5396 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
5397
5398 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5399}
5400
5401
5402/**
5403 * Implements sldt GReg
5404 *
5405 * @param iGReg The general register to store the CRx value in.
5406 * @param enmEffOpSize The operand size.
5407 */
5408IEM_CIMPL_DEF_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5409{
5410 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5411 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5412 {
5413 Log(("sldt: Guest intercept -> VM-exit\n"));
5414 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_SLDT, cbInstr);
5415 }
5416
5417 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
5418
5419 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
5420 switch (enmEffOpSize)
5421 {
5422 case IEMMODE_16BIT: *(uint16_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.ldtr.Sel; break;
5423 case IEMMODE_32BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.ldtr.Sel; break;
5424 case IEMMODE_64BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.ldtr.Sel; break;
5425 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5426 }
5427 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5428}
5429
5430
5431/**
5432 * Implements sldt mem.
5433 *
5434 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5435 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5436 */
5437IEM_CIMPL_DEF_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5438{
5439 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
5440
5441 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
5442 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, pVCpu->cpum.GstCtx.ldtr.Sel);
5443 if (rcStrict == VINF_SUCCESS)
5444 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5445 return rcStrict;
5446}
5447
5448
5449/**
5450 * Implements ltr.
5451 *
5452 * @param uNewTr The new TSS selector value.
5453 */
5454IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
5455{
5456 /*
5457 * Check preconditions.
5458 */
5459 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5460 {
5461 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
5462 return iemRaiseUndefinedOpcode(pVCpu);
5463 }
5464 if (pVCpu->iem.s.uCpl != 0)
5465 {
5466 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pVCpu->iem.s.uCpl));
5467 return iemRaiseGeneralProtectionFault0(pVCpu);
5468 }
5469 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5470 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5471 {
5472 Log(("ltr: Guest intercept -> VM-exit\n"));
5473 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_LTR, cbInstr);
5474 }
5475 if (uNewTr & X86_SEL_LDT)
5476 {
5477 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
5478 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewTr);
5479 }
5480 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
5481 {
5482 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
5483 return iemRaiseGeneralProtectionFault0(pVCpu);
5484 }
5485 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TR_WRITES))
5486 {
5487 Log(("ltr: Guest intercept -> #VMEXIT\n"));
5488 IEM_SVM_UPDATE_NRIP(pVCpu);
5489 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5490 }
5491
5492 /*
5493 * Read the descriptor.
5494 */
5495 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_TR);
5496 IEMSELDESC Desc;
5497 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
5498 if (rcStrict != VINF_SUCCESS)
5499 return rcStrict;
5500
5501 /* Check GPs first. */
5502 if (Desc.Legacy.Gen.u1DescType)
5503 {
5504 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5505 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5506 }
5507 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
5508 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
5509 || IEM_IS_LONG_MODE(pVCpu)) )
5510 {
5511 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5512 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5513 }
5514 uint64_t u64Base;
5515 if (!IEM_IS_LONG_MODE(pVCpu))
5516 u64Base = X86DESC_BASE(&Desc.Legacy);
5517 else
5518 {
5519 if (Desc.Long.Gen.u5Zeros)
5520 {
5521 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
5522 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5523 }
5524
5525 u64Base = X86DESC64_BASE(&Desc.Long);
5526 if (!IEM_IS_CANONICAL(u64Base))
5527 {
5528 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
5529 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5530 }
5531 }
5532
5533 /* NP */
5534 if (!Desc.Legacy.Gen.u1Present)
5535 {
5536 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
5537 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewTr);
5538 }
5539
5540 /*
5541 * Set it busy.
5542 * Note! Intel says this should lock down the whole descriptor, but we'll
5543 * restrict our selves to 32-bit for now due to lack of inline
5544 * assembly and such.
5545 */
5546 void *pvDesc;
5547 rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pVCpu->cpum.GstCtx.gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL),
5548 IEM_ACCESS_DATA_RW, 0);
5549 if (rcStrict != VINF_SUCCESS)
5550 return rcStrict;
5551 switch ((uintptr_t)pvDesc & 3)
5552 {
5553 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
5554 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
5555 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
5556 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
5557 }
5558 rcStrict = iemMemCommitAndUnmap(pVCpu, pvDesc, IEM_ACCESS_DATA_RW);
5559 if (rcStrict != VINF_SUCCESS)
5560 return rcStrict;
5561 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
5562
5563 /*
5564 * It checks out alright, update the registers.
5565 */
5566/** @todo check if the actual value is loaded or if the RPL is dropped */
5567 CPUMSetGuestTR(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5568 pVCpu->cpum.GstCtx.tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
5569 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
5570 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5571 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5572 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
5573
5574 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5575}
5576
5577
5578/**
5579 * Implements str GReg
5580 *
5581 * @param iGReg The general register to store the CRx value in.
5582 * @param enmEffOpSize The operand size.
5583 */
5584IEM_CIMPL_DEF_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5585{
5586 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5587 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5588 {
5589 Log(("str_reg: Guest intercept -> VM-exit\n"));
5590 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_STR, cbInstr);
5591 }
5592
5593 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
5594
5595 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
5596 switch (enmEffOpSize)
5597 {
5598 case IEMMODE_16BIT: *(uint16_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.tr.Sel; break;
5599 case IEMMODE_32BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.tr.Sel; break;
5600 case IEMMODE_64BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.tr.Sel; break;
5601 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5602 }
5603 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5604}
5605
5606
5607/**
5608 * Implements str mem.
5609 *
5610 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5611 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5612 */
5613IEM_CIMPL_DEF_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5614{
5615 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5616 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5617 {
5618 Log(("str_mem: Guest intercept -> VM-exit\n"));
5619 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_STR, cbInstr);
5620 }
5621
5622 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
5623
5624 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
5625 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, pVCpu->cpum.GstCtx.tr.Sel);
5626 if (rcStrict == VINF_SUCCESS)
5627 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5628 return rcStrict;
5629}
5630
5631
5632/**
5633 * Implements mov GReg,CRx.
5634 *
5635 * @param iGReg The general register to store the CRx value in.
5636 * @param iCrReg The CRx register to read (valid).
5637 */
5638IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
5639{
5640 if (pVCpu->iem.s.uCpl != 0)
5641 return iemRaiseGeneralProtectionFault0(pVCpu);
5642 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5643
5644 if (IEM_SVM_IS_READ_CR_INTERCEPT_SET(pVCpu, iCrReg))
5645 {
5646 Log(("iemCImpl_mov_Rd_Cd: Guest intercept CR%u -> #VMEXIT\n", iCrReg));
5647 IEM_SVM_UPDATE_NRIP(pVCpu);
5648 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);
5649 }
5650
5651 /* Read it. */
5652 uint64_t crX;
5653 switch (iCrReg)
5654 {
5655 case 0:
5656 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5657 crX = pVCpu->cpum.GstCtx.cr0;
5658 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
5659 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */
5660 break;
5661 case 2:
5662 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR2);
5663 crX = pVCpu->cpum.GstCtx.cr2;
5664 break;
5665 case 3:
5666 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
5667 crX = pVCpu->cpum.GstCtx.cr3;
5668 break;
5669 case 4:
5670 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5671 crX = pVCpu->cpum.GstCtx.cr4;
5672 break;
5673 case 8:
5674 {
5675 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
5676#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5677 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5678 {
5679 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovFromCr8(pVCpu, iGReg, cbInstr);
5680 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5681 return rcStrict;
5682
5683 /*
5684 * If the Mov-from-CR8 doesn't cause a VM-exit, bits 7:4 of the VTPR is copied
5685 * to bits 0:3 of the destination operand. Bits 63:4 of the destination operand
5686 * are cleared.
5687 *
5688 * See Intel Spec. 29.3 "Virtualizing CR8-based TPR Accesses"
5689 */
5690 if (IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
5691 {
5692 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
5693 crX = (uTpr >> 4) & 0xf;
5694 break;
5695 }
5696 }
5697#endif
5698#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5699 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5700 {
5701 PCSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl;
5702 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu)))
5703 {
5704 crX = pVmcbCtrl->IntCtrl.n.u8VTPR & 0xf;
5705 break;
5706 }
5707 }
5708#endif
5709 uint8_t uTpr;
5710 int rc = APICGetTpr(pVCpu, &uTpr, NULL, NULL);
5711 if (RT_SUCCESS(rc))
5712 crX = uTpr >> 4;
5713 else
5714 crX = 0;
5715 break;
5716 }
5717 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5718 }
5719
5720#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5721 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5722 {
5723 switch (iCrReg)
5724 {
5725 /* CR0/CR4 reads are subject to masking when in VMX non-root mode. */
5726 case 0: crX = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u); break;
5727 case 4: crX = CPUMGetGuestVmxMaskedCr4(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr4Mask.u); break;
5728
5729 case 3:
5730 {
5731 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovFromCr3(pVCpu, iGReg, cbInstr);
5732 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5733 return rcStrict;
5734 break;
5735 }
5736 }
5737 }
5738#endif
5739
5740 /* Store it. */
5741 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5742 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = crX;
5743 else
5744 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)crX;
5745
5746 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5747}
5748
5749
5750/**
5751 * Implements smsw GReg.
5752 *
5753 * @param iGReg The general register to store the CRx value in.
5754 * @param enmEffOpSize The operand size.
5755 */
5756IEM_CIMPL_DEF_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5757{
5758 IEM_SVM_CHECK_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5759
5760#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5761 uint64_t u64MaskedCr0;
5762 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5763 u64MaskedCr0 = pVCpu->cpum.GstCtx.cr0;
5764 else
5765 u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u);
5766 uint64_t const u64GuestCr0 = u64MaskedCr0;
5767#else
5768 uint64_t const u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
5769#endif
5770
5771 switch (enmEffOpSize)
5772 {
5773 case IEMMODE_16BIT:
5774 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
5775 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = (uint16_t)u64GuestCr0;
5776 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5777 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = (uint16_t)u64GuestCr0 | 0xffe0;
5778 else
5779 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = (uint16_t)u64GuestCr0 | 0xfff0;
5780 break;
5781
5782 case IEMMODE_32BIT:
5783 *(uint32_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)u64GuestCr0;
5784 break;
5785
5786 case IEMMODE_64BIT:
5787 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = u64GuestCr0;
5788 break;
5789
5790 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5791 }
5792
5793 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5794}
5795
5796
5797/**
5798 * Implements smsw mem.
5799 *
5800 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5801 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5802 */
5803IEM_CIMPL_DEF_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5804{
5805 IEM_SVM_CHECK_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5806
5807#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5808 uint64_t u64MaskedCr0;
5809 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5810 u64MaskedCr0 = pVCpu->cpum.GstCtx.cr0;
5811 else
5812 u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u);
5813 uint64_t const u64GuestCr0 = u64MaskedCr0;
5814#else
5815 uint64_t const u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
5816#endif
5817
5818 uint16_t u16Value;
5819 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
5820 u16Value = (uint16_t)u64GuestCr0;
5821 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5822 u16Value = (uint16_t)u64GuestCr0 | 0xffe0;
5823 else
5824 u16Value = (uint16_t)u64GuestCr0 | 0xfff0;
5825
5826 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, u16Value);
5827 if (rcStrict == VINF_SUCCESS)
5828 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5829 return rcStrict;
5830}
5831
5832
5833/**
5834 * Helper for mapping CR3 and PAE PDPEs for 'mov CRx,GReg'.
5835 */
5836#define IEM_MAP_PAE_PDPES_AT_CR3_RET(a_pVCpu, a_iCrReg, a_uCr3) \
5837 do \
5838 { \
5839 int const rcX = PGMGstMapPaePdpesAtCr3(a_pVCpu, a_uCr3); \
5840 if (RT_SUCCESS(rcX)) \
5841 { /* likely */ } \
5842 else \
5843 { \
5844 /* Either invalid PDPTEs or CR3 second-level translation failed. Raise #GP(0) either way. */ \
5845 Log(("iemCImpl_load_Cr%#x: Trying to load invalid PAE PDPEs\n", a_iCrReg)); \
5846 return iemRaiseGeneralProtectionFault0(a_pVCpu); \
5847 } \
5848 } while (0)
5849
5850
5851/**
5852 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
5853 *
5854 * @param iCrReg The CRx register to write (valid).
5855 * @param uNewCrX The new value.
5856 * @param enmAccessCrX The instruction that caused the CrX load.
5857 * @param iGReg The general register in case of a 'mov CRx,GReg'
5858 * instruction.
5859 */
5860IEM_CIMPL_DEF_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg)
5861{
5862 VBOXSTRICTRC rcStrict;
5863 int rc;
5864#ifndef VBOX_WITH_NESTED_HWVIRT_SVM
5865 RT_NOREF2(iGReg, enmAccessCrX);
5866#endif
5867
5868 /*
5869 * Try store it.
5870 * Unfortunately, CPUM only does a tiny bit of the work.
5871 */
5872 switch (iCrReg)
5873 {
5874 case 0:
5875 {
5876 /*
5877 * Perform checks.
5878 */
5879 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5880
5881 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr0;
5882 uint32_t const fValid = CPUMGetGuestCR0ValidMask();
5883
5884 /* ET is hardcoded on 486 and later. */
5885 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_486)
5886 uNewCrX |= X86_CR0_ET;
5887 /* The 386 and 486 didn't #GP(0) on attempting to set reserved CR0 bits. ET was settable on 386. */
5888 else if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_486)
5889 {
5890 uNewCrX &= fValid;
5891 uNewCrX |= X86_CR0_ET;
5892 }
5893 else
5894 uNewCrX &= X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG | X86_CR0_ET;
5895
5896 /* Check for reserved bits. */
5897 if (uNewCrX & ~(uint64_t)fValid)
5898 {
5899 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5900 return iemRaiseGeneralProtectionFault0(pVCpu);
5901 }
5902
5903 /* Check for invalid combinations. */
5904 if ( (uNewCrX & X86_CR0_PG)
5905 && !(uNewCrX & X86_CR0_PE) )
5906 {
5907 Log(("Trying to set CR0.PG without CR0.PE\n"));
5908 return iemRaiseGeneralProtectionFault0(pVCpu);
5909 }
5910
5911 if ( !(uNewCrX & X86_CR0_CD)
5912 && (uNewCrX & X86_CR0_NW) )
5913 {
5914 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
5915 return iemRaiseGeneralProtectionFault0(pVCpu);
5916 }
5917
5918 if ( !(uNewCrX & X86_CR0_PG)
5919 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE))
5920 {
5921 Log(("Trying to clear CR0.PG while leaving CR4.PCID set\n"));
5922 return iemRaiseGeneralProtectionFault0(pVCpu);
5923 }
5924
5925 /* Long mode consistency checks. */
5926 if ( (uNewCrX & X86_CR0_PG)
5927 && !(uOldCrX & X86_CR0_PG)
5928 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) )
5929 {
5930 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE))
5931 {
5932 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
5933 return iemRaiseGeneralProtectionFault0(pVCpu);
5934 }
5935 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long)
5936 {
5937 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
5938 return iemRaiseGeneralProtectionFault0(pVCpu);
5939 }
5940 }
5941
5942#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5943 /* Check for bits that must remain set or cleared in VMX operation,
5944 see Intel spec. 23.8 "Restrictions on VMX operation". */
5945 if (IEM_VMX_IS_ROOT_MODE(pVCpu))
5946 {
5947 uint64_t const uCr0Fixed0 = iemVmxGetCr0Fixed0(pVCpu, IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
5948 if ((uNewCrX & uCr0Fixed0) != uCr0Fixed0)
5949 {
5950 Log(("Trying to clear reserved CR0 bits in VMX operation: NewCr0=%#llx MB1=%#llx\n", uNewCrX, uCr0Fixed0));
5951 return iemRaiseGeneralProtectionFault0(pVCpu);
5952 }
5953
5954 uint64_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
5955 if (uNewCrX & ~uCr0Fixed1)
5956 {
5957 Log(("Trying to set reserved CR0 bits in VMX operation: NewCr0=%#llx MB0=%#llx\n", uNewCrX, uCr0Fixed1));
5958 return iemRaiseGeneralProtectionFault0(pVCpu);
5959 }
5960 }
5961#endif
5962
5963 /*
5964 * SVM nested-guest CR0 write intercepts.
5965 */
5966 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, iCrReg))
5967 {
5968 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5969 IEM_SVM_UPDATE_NRIP(pVCpu);
5970 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);
5971 }
5972 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
5973 {
5974 /* 'lmsw' intercepts regardless of whether the TS/MP bits are actually toggled. */
5975 if ( enmAccessCrX == IEMACCESSCRX_LMSW
5976 || (uNewCrX & ~(X86_CR0_TS | X86_CR0_MP)) != (uOldCrX & ~(X86_CR0_TS | X86_CR0_MP)))
5977 {
5978 Assert(enmAccessCrX != IEMACCESSCRX_CLTS);
5979 Log(("iemCImpl_load_Cr%#x: lmsw or bits other than TS/MP changed: Guest intercept -> #VMEXIT\n", iCrReg));
5980 IEM_SVM_UPDATE_NRIP(pVCpu);
5981 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_CR0_SEL_WRITE, enmAccessCrX, iGReg);
5982 }
5983 }
5984
5985 /*
5986 * Change EFER.LMA if entering or leaving long mode.
5987 */
5988 uint64_t NewEFER = pVCpu->cpum.GstCtx.msrEFER;
5989 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
5990 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) )
5991 {
5992 if (uNewCrX & X86_CR0_PG)
5993 NewEFER |= MSR_K6_EFER_LMA;
5994 else
5995 NewEFER &= ~MSR_K6_EFER_LMA;
5996
5997 CPUMSetGuestEFER(pVCpu, NewEFER);
5998 Assert(pVCpu->cpum.GstCtx.msrEFER == NewEFER);
5999 }
6000
6001 /*
6002 * Inform PGM.
6003 */
6004 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW))
6005 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW)) )
6006 {
6007 if ( enmAccessCrX != IEMACCESSCRX_MOV_CRX
6008 || !CPUMIsPaePagingEnabled(uNewCrX, pVCpu->cpum.GstCtx.cr4, NewEFER)
6009 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6010 { /* likely */ }
6011 else
6012 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
6013 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
6014 AssertRCReturn(rc, rc);
6015 /* ignore informational status codes */
6016 }
6017
6018 /*
6019 * Change CR0.
6020 */
6021 CPUMSetGuestCR0(pVCpu, uNewCrX);
6022 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCrX);
6023
6024 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
6025 false /* fForce */);
6026 break;
6027 }
6028
6029 /*
6030 * CR2 can be changed without any restrictions.
6031 */
6032 case 2:
6033 {
6034 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 2))
6035 {
6036 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6037 IEM_SVM_UPDATE_NRIP(pVCpu);
6038 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);
6039 }
6040 pVCpu->cpum.GstCtx.cr2 = uNewCrX;
6041 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_CR2;
6042 rcStrict = VINF_SUCCESS;
6043 break;
6044 }
6045
6046 /*
6047 * CR3 is relatively simple, although AMD and Intel have different
6048 * accounts of how setting reserved bits are handled. We take intel's
6049 * word for the lower bits and AMD's for the high bits (63:52). The
6050 * lower reserved bits are ignored and left alone; OpenBSD 5.8 relies
6051 * on this.
6052 */
6053 /** @todo Testcase: Setting reserved bits in CR3, especially before
6054 * enabling paging. */
6055 case 3:
6056 {
6057 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
6058
6059 /* Bit 63 being clear in the source operand with PCIDE indicates no invalidations are required. */
6060 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE)
6061 && (uNewCrX & RT_BIT_64(63)))
6062 {
6063 /** @todo r=ramshankar: avoiding a TLB flush altogether here causes Windows 10
6064 * SMP(w/o nested-paging) to hang during bootup on Skylake systems, see
6065 * Intel spec. 4.10.4.1 "Operations that Invalidate TLBs and
6066 * Paging-Structure Caches". */
6067 uNewCrX &= ~RT_BIT_64(63);
6068 }
6069
6070 /* Check / mask the value. */
6071#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6072 /* See Intel spec. 27.2.2 "EPT Translation Mechanism" footnote. */
6073 uint64_t const fInvPhysMask = !CPUMIsGuestVmxEptPagingEnabledEx(IEM_GET_CTX(pVCpu))
6074 ? (UINT64_MAX << IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
6075 : (~X86_CR3_EPT_PAGE_MASK & X86_PAGE_4K_BASE_MASK);
6076#else
6077 uint64_t const fInvPhysMask = UINT64_C(0xfff0000000000000);
6078#endif
6079 if (uNewCrX & fInvPhysMask)
6080 {
6081 /** @todo Should we raise this only for 64-bit mode like Intel claims? AMD is
6082 * very vague in this area. As mentioned above, need testcase on real
6083 * hardware... Sigh. */
6084 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
6085 return iemRaiseGeneralProtectionFault0(pVCpu);
6086 }
6087
6088 uint64_t fValid;
6089 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
6090 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME))
6091 {
6092 /** @todo Redundant? This value has already been validated above. */
6093 fValid = UINT64_C(0x000fffffffffffff);
6094 }
6095 else
6096 fValid = UINT64_C(0xffffffff);
6097 if (uNewCrX & ~fValid)
6098 {
6099 Log(("Automatically clearing reserved MBZ bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
6100 uNewCrX, uNewCrX & ~fValid));
6101 uNewCrX &= fValid;
6102 }
6103
6104 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 3))
6105 {
6106 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6107 IEM_SVM_UPDATE_NRIP(pVCpu);
6108 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);
6109 }
6110
6111 /* Inform PGM. */
6112 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG)
6113 {
6114 if ( !CPUMIsGuestInPAEModeEx(IEM_GET_CTX(pVCpu))
6115 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6116 { /* likely */ }
6117 else
6118 {
6119 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
6120 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, uNewCrX);
6121 }
6122 rc = PGMFlushTLB(pVCpu, uNewCrX, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
6123 AssertRCReturn(rc, rc);
6124 /* ignore informational status codes */
6125 }
6126
6127 /* Make the change. */
6128 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
6129 AssertRCSuccessReturn(rc, rc);
6130
6131 rcStrict = VINF_SUCCESS;
6132 break;
6133 }
6134
6135 /*
6136 * CR4 is a bit more tedious as there are bits which cannot be cleared
6137 * under some circumstances and such.
6138 */
6139 case 4:
6140 {
6141 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6142 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr4;
6143
6144 /* Reserved bits. */
6145 uint32_t const fValid = CPUMGetGuestCR4ValidMask(pVCpu->CTX_SUFF(pVM));
6146 if (uNewCrX & ~(uint64_t)fValid)
6147 {
6148 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
6149 return iemRaiseGeneralProtectionFault0(pVCpu);
6150 }
6151
6152 bool const fPcide = !(uOldCrX & X86_CR4_PCIDE) && (uNewCrX & X86_CR4_PCIDE);
6153 bool const fLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
6154
6155 /* PCIDE check. */
6156 if ( fPcide
6157 && ( !fLongMode
6158 || (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff))))
6159 {
6160 Log(("Trying to set PCIDE with invalid PCID or outside long mode. Pcid=%#x\n", (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff))));
6161 return iemRaiseGeneralProtectionFault0(pVCpu);
6162 }
6163
6164 /* PAE check. */
6165 if ( fLongMode
6166 && (uOldCrX & X86_CR4_PAE)
6167 && !(uNewCrX & X86_CR4_PAE))
6168 {
6169 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
6170 return iemRaiseGeneralProtectionFault0(pVCpu);
6171 }
6172
6173 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 4))
6174 {
6175 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6176 IEM_SVM_UPDATE_NRIP(pVCpu);
6177 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
6178 }
6179
6180 /* Check for bits that must remain set or cleared in VMX operation,
6181 see Intel spec. 23.8 "Restrictions on VMX operation". */
6182 if (IEM_VMX_IS_ROOT_MODE(pVCpu))
6183 {
6184 uint64_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
6185 if ((uNewCrX & uCr4Fixed0) != uCr4Fixed0)
6186 {
6187 Log(("Trying to clear reserved CR4 bits in VMX operation: NewCr4=%#llx MB1=%#llx\n", uNewCrX, uCr4Fixed0));
6188 return iemRaiseGeneralProtectionFault0(pVCpu);
6189 }
6190
6191 uint64_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
6192 if (uNewCrX & ~uCr4Fixed1)
6193 {
6194 Log(("Trying to set reserved CR4 bits in VMX operation: NewCr4=%#llx MB0=%#llx\n", uNewCrX, uCr4Fixed1));
6195 return iemRaiseGeneralProtectionFault0(pVCpu);
6196 }
6197 }
6198
6199 /*
6200 * Notify PGM.
6201 */
6202 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */))
6203 {
6204 if ( !CPUMIsPaePagingEnabled(pVCpu->cpum.GstCtx.cr0, uNewCrX, pVCpu->cpum.GstCtx.msrEFER)
6205 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6206 { /* likely */ }
6207 else
6208 {
6209 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
6210 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
6211 }
6212 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
6213 AssertRCReturn(rc, rc);
6214 /* ignore informational status codes */
6215 }
6216
6217 /*
6218 * Change it.
6219 */
6220 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
6221 AssertRCSuccessReturn(rc, rc);
6222 Assert(pVCpu->cpum.GstCtx.cr4 == uNewCrX);
6223
6224 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
6225 false /* fForce */);
6226 break;
6227 }
6228
6229 /*
6230 * CR8 maps to the APIC TPR.
6231 */
6232 case 8:
6233 {
6234 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
6235 if (uNewCrX & ~(uint64_t)0xf)
6236 {
6237 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
6238 return iemRaiseGeneralProtectionFault0(pVCpu);
6239 }
6240
6241#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6242 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6243 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
6244 {
6245 /*
6246 * If the Mov-to-CR8 doesn't cause a VM-exit, bits 0:3 of the source operand
6247 * is copied to bits 7:4 of the VTPR. Bits 0:3 and bits 31:8 of the VTPR are
6248 * cleared. Following this the processor performs TPR virtualization.
6249 *
6250 * However, we should not perform TPR virtualization immediately here but
6251 * after this instruction has completed.
6252 *
6253 * See Intel spec. 29.3 "Virtualizing CR8-based TPR Accesses"
6254 * See Intel spec. 27.1 "Architectural State Before A VM-exit"
6255 */
6256 uint32_t const uTpr = (uNewCrX & 0xf) << 4;
6257 Log(("iemCImpl_load_Cr%#x: Virtualizing TPR (%#x) write\n", iCrReg, uTpr));
6258 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uTpr);
6259 iemVmxVirtApicSetPendingWrite(pVCpu, XAPIC_OFF_TPR);
6260 rcStrict = VINF_SUCCESS;
6261 break;
6262 }
6263#endif
6264
6265#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6266 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6267 {
6268 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 8))
6269 {
6270 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6271 IEM_SVM_UPDATE_NRIP(pVCpu);
6272 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);
6273 }
6274
6275 pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u8VTPR = uNewCrX;
6276 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu)))
6277 {
6278 rcStrict = VINF_SUCCESS;
6279 break;
6280 }
6281 }
6282#endif
6283 uint8_t const u8Tpr = (uint8_t)uNewCrX << 4;
6284 APICSetTpr(pVCpu, u8Tpr);
6285 rcStrict = VINF_SUCCESS;
6286 break;
6287 }
6288
6289 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
6290 }
6291
6292 /*
6293 * Advance the RIP on success.
6294 */
6295 if (RT_SUCCESS(rcStrict))
6296 {
6297 if (rcStrict != VINF_SUCCESS)
6298 iemSetPassUpStatus(pVCpu, rcStrict);
6299 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6300 }
6301
6302 return rcStrict;
6303}
6304
6305
6306/**
6307 * Implements mov CRx,GReg.
6308 *
6309 * @param iCrReg The CRx register to write (valid).
6310 * @param iGReg The general register to load the CRx value from.
6311 */
6312IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
6313{
6314 if (pVCpu->iem.s.uCpl != 0)
6315 return iemRaiseGeneralProtectionFault0(pVCpu);
6316 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6317
6318 /*
6319 * Read the new value from the source register and call common worker.
6320 */
6321 uint64_t uNewCrX;
6322 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6323 uNewCrX = iemGRegFetchU64(pVCpu, iGReg);
6324 else
6325 uNewCrX = iemGRegFetchU32(pVCpu, iGReg);
6326
6327#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6328 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6329 {
6330 VBOXSTRICTRC rcStrict = VINF_VMX_INTERCEPT_NOT_ACTIVE;
6331 switch (iCrReg)
6332 {
6333 case 0:
6334 case 4: rcStrict = iemVmxVmexitInstrMovToCr0Cr4(pVCpu, iCrReg, &uNewCrX, iGReg, cbInstr); break;
6335 case 3: rcStrict = iemVmxVmexitInstrMovToCr3(pVCpu, uNewCrX, iGReg, cbInstr); break;
6336 case 8: rcStrict = iemVmxVmexitInstrMovToCr8(pVCpu, iGReg, cbInstr); break;
6337 }
6338 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6339 return rcStrict;
6340 }
6341#endif
6342
6343 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, iCrReg, uNewCrX, IEMACCESSCRX_MOV_CRX, iGReg);
6344}
6345
6346
6347/**
6348 * Implements 'LMSW r/m16'
6349 *
6350 * @param u16NewMsw The new value.
6351 * @param GCPtrEffDst The guest-linear address of the source operand in case
6352 * of a memory operand. For register operand, pass
6353 * NIL_RTGCPTR.
6354 */
6355IEM_CIMPL_DEF_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst)
6356{
6357 if (pVCpu->iem.s.uCpl != 0)
6358 return iemRaiseGeneralProtectionFault0(pVCpu);
6359 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6360 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6361
6362#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6363 /* Check nested-guest VMX intercept and get updated MSW if there's no VM-exit. */
6364 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6365 {
6366 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrLmsw(pVCpu, pVCpu->cpum.GstCtx.cr0, &u16NewMsw, GCPtrEffDst, cbInstr);
6367 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6368 return rcStrict;
6369 }
6370#else
6371 RT_NOREF_PV(GCPtrEffDst);
6372#endif
6373
6374 /*
6375 * Compose the new CR0 value and call common worker.
6376 */
6377 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
6378 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
6379 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */);
6380}
6381
6382
6383/**
6384 * Implements 'CLTS'.
6385 */
6386IEM_CIMPL_DEF_0(iemCImpl_clts)
6387{
6388 if (pVCpu->iem.s.uCpl != 0)
6389 return iemRaiseGeneralProtectionFault0(pVCpu);
6390
6391 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6392 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0;
6393 uNewCr0 &= ~X86_CR0_TS;
6394
6395#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6396 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6397 {
6398 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrClts(pVCpu, cbInstr);
6399 if (rcStrict == VINF_VMX_MODIFIES_BEHAVIOR)
6400 uNewCr0 |= (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS);
6401 else if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6402 return rcStrict;
6403 }
6404#endif
6405
6406 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_CLTS, UINT8_MAX /* iGReg */);
6407}
6408
6409
6410/**
6411 * Implements mov GReg,DRx.
6412 *
6413 * @param iGReg The general register to store the DRx value in.
6414 * @param iDrReg The DRx register to read (0-7).
6415 */
6416IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
6417{
6418#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6419 /*
6420 * Check nested-guest VMX intercept.
6421 * Unlike most other intercepts, the Mov DRx intercept takes preceedence
6422 * over CPL and CR4.DE and even DR4/DR5 checks.
6423 *
6424 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
6425 */
6426 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6427 {
6428 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovDrX(pVCpu, VMXINSTRID_MOV_FROM_DRX, iDrReg, iGReg, cbInstr);
6429 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6430 return rcStrict;
6431 }
6432#endif
6433
6434 /*
6435 * Check preconditions.
6436 */
6437 /* Raise GPs. */
6438 if (pVCpu->iem.s.uCpl != 0)
6439 return iemRaiseGeneralProtectionFault0(pVCpu);
6440 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6441 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6442
6443 /** @todo \#UD in outside ring-0 too? */
6444 if (iDrReg == 4 || iDrReg == 5)
6445 {
6446 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR4);
6447 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE)
6448 {
6449 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
6450 return iemRaiseGeneralProtectionFault0(pVCpu);
6451 }
6452 iDrReg += 2;
6453 }
6454
6455 /* Raise #DB if general access detect is enabled. */
6456 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD)
6457 {
6458 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
6459 return iemRaiseDebugException(pVCpu);
6460 }
6461
6462 /*
6463 * Read the debug register and store it in the specified general register.
6464 */
6465 uint64_t drX;
6466 switch (iDrReg)
6467 {
6468 case 0:
6469 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6470 drX = pVCpu->cpum.GstCtx.dr[0];
6471 break;
6472 case 1:
6473 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6474 drX = pVCpu->cpum.GstCtx.dr[1];
6475 break;
6476 case 2:
6477 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6478 drX = pVCpu->cpum.GstCtx.dr[2];
6479 break;
6480 case 3:
6481 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6482 drX = pVCpu->cpum.GstCtx.dr[3];
6483 break;
6484 case 6:
6485 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
6486 drX = pVCpu->cpum.GstCtx.dr[6];
6487 drX |= X86_DR6_RA1_MASK;
6488 drX &= ~X86_DR6_RAZ_MASK;
6489 break;
6490 case 7:
6491 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6492 drX = pVCpu->cpum.GstCtx.dr[7];
6493 drX |=X86_DR7_RA1_MASK;
6494 drX &= ~X86_DR7_RAZ_MASK;
6495 break;
6496 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* caller checks */
6497 }
6498
6499 /** @todo SVM nested-guest intercept for DR8-DR15? */
6500 /*
6501 * Check for any SVM nested-guest intercepts for the DRx read.
6502 */
6503 if (IEM_SVM_IS_READ_DR_INTERCEPT_SET(pVCpu, iDrReg))
6504 {
6505 Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg));
6506 IEM_SVM_UPDATE_NRIP(pVCpu);
6507 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),
6508 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
6509 }
6510
6511 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6512 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = drX;
6513 else
6514 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)drX;
6515
6516 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6517}
6518
6519
6520/**
6521 * Implements mov DRx,GReg.
6522 *
6523 * @param iDrReg The DRx register to write (valid).
6524 * @param iGReg The general register to load the DRx value from.
6525 */
6526IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
6527{
6528#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6529 /*
6530 * Check nested-guest VMX intercept.
6531 * Unlike most other intercepts, the Mov DRx intercept takes preceedence
6532 * over CPL and CR4.DE and even DR4/DR5 checks.
6533 *
6534 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
6535 */
6536 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6537 {
6538 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovDrX(pVCpu, VMXINSTRID_MOV_TO_DRX, iDrReg, iGReg, cbInstr);
6539 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6540 return rcStrict;
6541 }
6542#endif
6543
6544 /*
6545 * Check preconditions.
6546 */
6547 if (pVCpu->iem.s.uCpl != 0)
6548 return iemRaiseGeneralProtectionFault0(pVCpu);
6549 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6550 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6551
6552 if (iDrReg == 4 || iDrReg == 5)
6553 {
6554 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR4);
6555 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE)
6556 {
6557 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
6558 return iemRaiseGeneralProtectionFault0(pVCpu);
6559 }
6560 iDrReg += 2;
6561 }
6562
6563 /* Raise #DB if general access detect is enabled. */
6564 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
6565 * \#GP? */
6566 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD)
6567 {
6568 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
6569 return iemRaiseDebugException(pVCpu);
6570 }
6571
6572 /*
6573 * Read the new value from the source register.
6574 */
6575 uint64_t uNewDrX;
6576 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6577 uNewDrX = iemGRegFetchU64(pVCpu, iGReg);
6578 else
6579 uNewDrX = iemGRegFetchU32(pVCpu, iGReg);
6580
6581 /*
6582 * Adjust it.
6583 */
6584 switch (iDrReg)
6585 {
6586 case 0:
6587 case 1:
6588 case 2:
6589 case 3:
6590 /* nothing to adjust */
6591 break;
6592
6593 case 6:
6594 if (uNewDrX & X86_DR6_MBZ_MASK)
6595 {
6596 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
6597 return iemRaiseGeneralProtectionFault0(pVCpu);
6598 }
6599 uNewDrX |= X86_DR6_RA1_MASK;
6600 uNewDrX &= ~X86_DR6_RAZ_MASK;
6601 break;
6602
6603 case 7:
6604 if (uNewDrX & X86_DR7_MBZ_MASK)
6605 {
6606 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
6607 return iemRaiseGeneralProtectionFault0(pVCpu);
6608 }
6609 uNewDrX |= X86_DR7_RA1_MASK;
6610 uNewDrX &= ~X86_DR7_RAZ_MASK;
6611 break;
6612
6613 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6614 }
6615
6616 /** @todo SVM nested-guest intercept for DR8-DR15? */
6617 /*
6618 * Check for any SVM nested-guest intercepts for the DRx write.
6619 */
6620 if (IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(pVCpu, iDrReg))
6621 {
6622 Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg));
6623 IEM_SVM_UPDATE_NRIP(pVCpu);
6624 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),
6625 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
6626 }
6627
6628 /*
6629 * Do the actual setting.
6630 */
6631 if (iDrReg < 4)
6632 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6633 else if (iDrReg == 6)
6634 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
6635
6636 int rc = CPUMSetGuestDRx(pVCpu, iDrReg, uNewDrX);
6637 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc);
6638
6639 /*
6640 * Re-init hardware breakpoint summary if it was DR7 that got changed.
6641 */
6642 if (iDrReg == 7)
6643 {
6644 pVCpu->iem.s.fPendingInstructionBreakpoints = false;
6645 pVCpu->iem.s.fPendingDataBreakpoints = false;
6646 pVCpu->iem.s.fPendingIoBreakpoints = false;
6647 iemInitPendingBreakpointsSlow(pVCpu);
6648 }
6649
6650 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6651}
6652
6653
6654/**
6655 * Implements mov GReg,TRx.
6656 *
6657 * @param iGReg The general register to store the
6658 * TRx value in.
6659 * @param iTrReg The TRx register to read (6/7).
6660 */
6661IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Td, uint8_t, iGReg, uint8_t, iTrReg)
6662{
6663 /*
6664 * Check preconditions. NB: This instruction is 386/486 only.
6665 */
6666
6667 /* Raise GPs. */
6668 if (pVCpu->iem.s.uCpl != 0)
6669 return iemRaiseGeneralProtectionFault0(pVCpu);
6670 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6671
6672 if (iTrReg < 6 || iTrReg > 7)
6673 {
6674 /** @todo Do Intel CPUs reject this or are the TRs aliased? */
6675 Log(("mov r%u,tr%u: invalid register -> #GP(0)\n", iGReg, iTrReg));
6676 return iemRaiseGeneralProtectionFault0(pVCpu);
6677 }
6678
6679 /*
6680 * Read the test register and store it in the specified general register.
6681 * This is currently a dummy implementation that only exists to satisfy
6682 * old debuggers like WDEB386 or OS/2 KDB which unconditionally read the
6683 * TR6/TR7 registers. Software which actually depends on the TR values
6684 * (different on 386/486) is exceedingly rare.
6685 */
6686 uint64_t trX;
6687 switch (iTrReg)
6688 {
6689 case 6:
6690 trX = 0; /* Currently a dummy. */
6691 break;
6692 case 7:
6693 trX = 0; /* Currently a dummy. */
6694 break;
6695 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
6696 }
6697
6698 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)trX;
6699
6700 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6701}
6702
6703
6704/**
6705 * Implements mov TRx,GReg.
6706 *
6707 * @param iTrReg The TRx register to write (valid).
6708 * @param iGReg The general register to load the TRx
6709 * value from.
6710 */
6711IEM_CIMPL_DEF_2(iemCImpl_mov_Td_Rd, uint8_t, iTrReg, uint8_t, iGReg)
6712{
6713 /*
6714 * Check preconditions. NB: This instruction is 386/486 only.
6715 */
6716
6717 /* Raise GPs. */
6718 if (pVCpu->iem.s.uCpl != 0)
6719 return iemRaiseGeneralProtectionFault0(pVCpu);
6720 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6721
6722 if (iTrReg < 6 || iTrReg > 7)
6723 {
6724 /** @todo Do Intel CPUs reject this or are the TRs aliased? */
6725 Log(("mov r%u,tr%u: invalid register -> #GP(0)\n", iGReg, iTrReg));
6726 return iemRaiseGeneralProtectionFault0(pVCpu);
6727 }
6728
6729 /*
6730 * Read the new value from the source register.
6731 */
6732 uint64_t uNewTrX;
6733 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6734 uNewTrX = iemGRegFetchU64(pVCpu, iGReg);
6735 else
6736 uNewTrX = iemGRegFetchU32(pVCpu, iGReg);
6737
6738 /*
6739 * Here we would do the actual setting if this weren't a dummy implementation.
6740 * This is currently a dummy implementation that only exists to prevent
6741 * old debuggers like WDEB386 or OS/2 KDB from crashing.
6742 */
6743 RT_NOREF(uNewTrX);
6744
6745 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6746}
6747
6748
6749/**
6750 * Implements 'INVLPG m'.
6751 *
6752 * @param GCPtrPage The effective address of the page to invalidate.
6753 * @remarks Updates the RIP.
6754 */
6755IEM_CIMPL_DEF_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage)
6756{
6757 /* ring-0 only. */
6758 if (pVCpu->iem.s.uCpl != 0)
6759 return iemRaiseGeneralProtectionFault0(pVCpu);
6760 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6761 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
6762
6763#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6764 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6765 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INVLPG_EXIT))
6766 {
6767 Log(("invlpg: Guest intercept (%RGp) -> VM-exit\n", GCPtrPage));
6768 return iemVmxVmexitInstrInvlpg(pVCpu, GCPtrPage, cbInstr);
6769 }
6770#endif
6771
6772 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
6773 {
6774 Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
6775 IEM_SVM_UPDATE_NRIP(pVCpu);
6776 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_INVLPG,
6777 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? GCPtrPage : 0, 0 /* uExitInfo2 */);
6778 }
6779
6780 int rc = PGMInvalidatePage(pVCpu, GCPtrPage);
6781 if (rc == VINF_SUCCESS)
6782 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6783 if (rc == VINF_PGM_SYNC_CR3)
6784 {
6785 iemSetPassUpStatus(pVCpu, rc);
6786 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6787 }
6788
6789 AssertMsg(RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6790 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", GCPtrPage, rc));
6791 return rc;
6792}
6793
6794
6795/**
6796 * Implements INVPCID.
6797 *
6798 * @param iEffSeg The segment of the invpcid descriptor.
6799 * @param GCPtrInvpcidDesc The address of invpcid descriptor.
6800 * @param uInvpcidType The invalidation type.
6801 * @remarks Updates the RIP.
6802 */
6803IEM_CIMPL_DEF_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint64_t, uInvpcidType)
6804{
6805 /*
6806 * Check preconditions.
6807 */
6808 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fInvpcid)
6809 return iemRaiseUndefinedOpcode(pVCpu);
6810
6811 /* When in VMX non-root mode and INVPCID is not enabled, it results in #UD. */
6812 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6813 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_INVPCID))
6814 {
6815 Log(("invpcid: Not enabled for nested-guest execution -> #UD\n"));
6816 return iemRaiseUndefinedOpcode(pVCpu);
6817 }
6818
6819 if (pVCpu->iem.s.uCpl != 0)
6820 {
6821 Log(("invpcid: CPL != 0 -> #GP(0)\n"));
6822 return iemRaiseGeneralProtectionFault0(pVCpu);
6823 }
6824
6825 if (IEM_IS_V86_MODE(pVCpu))
6826 {
6827 Log(("invpcid: v8086 mode -> #GP(0)\n"));
6828 return iemRaiseGeneralProtectionFault0(pVCpu);
6829 }
6830
6831 /*
6832 * Check nested-guest intercept.
6833 *
6834 * INVPCID causes a VM-exit if "enable INVPCID" and "INVLPG exiting" are
6835 * both set. We have already checked the former earlier in this function.
6836 *
6837 * CPL and virtual-8086 mode checks take priority over this VM-exit.
6838 * See Intel spec. "25.1.1 Relative Priority of Faults and VM Exits".
6839 */
6840 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6841 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INVLPG_EXIT))
6842 {
6843 Log(("invpcid: Guest intercept -> #VM-exit\n"));
6844 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_INVPCID, VMXINSTRID_NONE, cbInstr);
6845 }
6846
6847 if (uInvpcidType > X86_INVPCID_TYPE_MAX_VALID)
6848 {
6849 Log(("invpcid: invalid/unrecognized invpcid type %#RX64 -> #GP(0)\n", uInvpcidType));
6850 return iemRaiseGeneralProtectionFault0(pVCpu);
6851 }
6852 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
6853
6854 /*
6855 * Fetch the invpcid descriptor from guest memory.
6856 */
6857 RTUINT128U uDesc;
6858 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInvpcidDesc);
6859 if (rcStrict == VINF_SUCCESS)
6860 {
6861 /*
6862 * Validate the descriptor.
6863 */
6864 if (uDesc.s.Lo > 0xfff)
6865 {
6866 Log(("invpcid: reserved bits set in invpcid descriptor %#RX64 -> #GP(0)\n", uDesc.s.Lo));
6867 return iemRaiseGeneralProtectionFault0(pVCpu);
6868 }
6869
6870 RTGCUINTPTR64 const GCPtrInvAddr = uDesc.s.Hi;
6871 uint8_t const uPcid = uDesc.s.Lo & UINT64_C(0xfff);
6872 uint32_t const uCr4 = pVCpu->cpum.GstCtx.cr4;
6873 uint64_t const uCr3 = pVCpu->cpum.GstCtx.cr3;
6874 switch (uInvpcidType)
6875 {
6876 case X86_INVPCID_TYPE_INDV_ADDR:
6877 {
6878 if (!IEM_IS_CANONICAL(GCPtrInvAddr))
6879 {
6880 Log(("invpcid: invalidation address %#RGP is not canonical -> #GP(0)\n", GCPtrInvAddr));
6881 return iemRaiseGeneralProtectionFault0(pVCpu);
6882 }
6883 if ( !(uCr4 & X86_CR4_PCIDE)
6884 && uPcid != 0)
6885 {
6886 Log(("invpcid: invalid pcid %#x\n", uPcid));
6887 return iemRaiseGeneralProtectionFault0(pVCpu);
6888 }
6889
6890 /* Invalidate mappings for the linear address tagged with PCID except global translations. */
6891 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6892 break;
6893 }
6894
6895 case X86_INVPCID_TYPE_SINGLE_CONTEXT:
6896 {
6897 if ( !(uCr4 & X86_CR4_PCIDE)
6898 && uPcid != 0)
6899 {
6900 Log(("invpcid: invalid pcid %#x\n", uPcid));
6901 return iemRaiseGeneralProtectionFault0(pVCpu);
6902 }
6903 /* Invalidate all mappings associated with PCID except global translations. */
6904 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6905 break;
6906 }
6907
6908 case X86_INVPCID_TYPE_ALL_CONTEXT_INCL_GLOBAL:
6909 {
6910 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
6911 break;
6912 }
6913
6914 case X86_INVPCID_TYPE_ALL_CONTEXT_EXCL_GLOBAL:
6915 {
6916 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6917 break;
6918 }
6919 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6920 }
6921 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6922 }
6923 return rcStrict;
6924}
6925
6926
6927/**
6928 * Implements INVD.
6929 */
6930IEM_CIMPL_DEF_0(iemCImpl_invd)
6931{
6932 if (pVCpu->iem.s.uCpl != 0)
6933 {
6934 Log(("invd: CPL != 0 -> #GP(0)\n"));
6935 return iemRaiseGeneralProtectionFault0(pVCpu);
6936 }
6937
6938 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6939 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_INVD, cbInstr);
6940
6941 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_INVD, SVM_EXIT_INVD, 0, 0);
6942
6943 /* We currently take no action here. */
6944 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6945}
6946
6947
6948/**
6949 * Implements WBINVD.
6950 */
6951IEM_CIMPL_DEF_0(iemCImpl_wbinvd)
6952{
6953 if (pVCpu->iem.s.uCpl != 0)
6954 {
6955 Log(("wbinvd: CPL != 0 -> #GP(0)\n"));
6956 return iemRaiseGeneralProtectionFault0(pVCpu);
6957 }
6958
6959 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6960 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_WBINVD, cbInstr);
6961
6962 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_WBINVD, SVM_EXIT_WBINVD, 0, 0);
6963
6964 /* We currently take no action here. */
6965 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6966}
6967
6968
6969/** Opcode 0x0f 0xaa. */
6970IEM_CIMPL_DEF_0(iemCImpl_rsm)
6971{
6972 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RSM, SVM_EXIT_RSM, 0, 0);
6973 NOREF(cbInstr);
6974 return iemRaiseUndefinedOpcode(pVCpu);
6975}
6976
6977
6978/**
6979 * Implements RDTSC.
6980 */
6981IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
6982{
6983 /*
6984 * Check preconditions.
6985 */
6986 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fTsc)
6987 return iemRaiseUndefinedOpcode(pVCpu);
6988
6989 if (pVCpu->iem.s.uCpl != 0)
6990 {
6991 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6992 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD)
6993 {
6994 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
6995 return iemRaiseGeneralProtectionFault0(pVCpu);
6996 }
6997 }
6998
6999 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7000 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDTSC_EXIT))
7001 {
7002 Log(("rdtsc: Guest intercept -> VM-exit\n"));
7003 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDTSC, cbInstr);
7004 }
7005
7006 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
7007 {
7008 Log(("rdtsc: Guest intercept -> #VMEXIT\n"));
7009 IEM_SVM_UPDATE_NRIP(pVCpu);
7010 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7011 }
7012
7013 /*
7014 * Do the job.
7015 */
7016 uint64_t uTicks = TMCpuTickGet(pVCpu);
7017#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
7018 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
7019#endif
7020 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks);
7021 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks);
7022 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX); /* For IEMExecDecodedRdtsc. */
7023 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7024}
7025
7026
7027/**
7028 * Implements RDTSC.
7029 */
7030IEM_CIMPL_DEF_0(iemCImpl_rdtscp)
7031{
7032 /*
7033 * Check preconditions.
7034 */
7035 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fRdTscP)
7036 return iemRaiseUndefinedOpcode(pVCpu);
7037
7038 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7039 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_RDTSCP))
7040 {
7041 Log(("rdtscp: Not enabled for VMX non-root mode -> #UD\n"));
7042 return iemRaiseUndefinedOpcode(pVCpu);
7043 }
7044
7045 if (pVCpu->iem.s.uCpl != 0)
7046 {
7047 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
7048 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD)
7049 {
7050 Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7051 return iemRaiseGeneralProtectionFault0(pVCpu);
7052 }
7053 }
7054
7055 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7056 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDTSC_EXIT))
7057 {
7058 Log(("rdtscp: Guest intercept -> VM-exit\n"));
7059 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDTSCP, cbInstr);
7060 }
7061 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
7062 {
7063 Log(("rdtscp: Guest intercept -> #VMEXIT\n"));
7064 IEM_SVM_UPDATE_NRIP(pVCpu);
7065 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7066 }
7067
7068 /*
7069 * Do the job.
7070 * Query the MSR first in case of trips to ring-3.
7071 */
7072 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TSC_AUX);
7073 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pVCpu->cpum.GstCtx.rcx);
7074 if (rcStrict == VINF_SUCCESS)
7075 {
7076 /* Low dword of the TSC_AUX msr only. */
7077 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff);
7078
7079 uint64_t uTicks = TMCpuTickGet(pVCpu);
7080#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
7081 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
7082#endif
7083 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks);
7084 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks);
7085 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX); /* For IEMExecDecodedRdtscp. */
7086 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7087 }
7088 return rcStrict;
7089}
7090
7091
7092/**
7093 * Implements RDPMC.
7094 */
7095IEM_CIMPL_DEF_0(iemCImpl_rdpmc)
7096{
7097 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
7098
7099 if ( pVCpu->iem.s.uCpl != 0
7100 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCE))
7101 return iemRaiseGeneralProtectionFault0(pVCpu);
7102
7103 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7104 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDPMC_EXIT))
7105 {
7106 Log(("rdpmc: Guest intercept -> VM-exit\n"));
7107 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDPMC, cbInstr);
7108 }
7109
7110 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
7111 {
7112 Log(("rdpmc: Guest intercept -> #VMEXIT\n"));
7113 IEM_SVM_UPDATE_NRIP(pVCpu);
7114 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7115 }
7116
7117 /** @todo Emulate performance counters, for now just return 0. */
7118 pVCpu->cpum.GstCtx.rax = 0;
7119 pVCpu->cpum.GstCtx.rdx = 0;
7120 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
7121 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
7122 * ecx but see @bugref{3472}! */
7123
7124 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7125}
7126
7127
7128/**
7129 * Implements RDMSR.
7130 */
7131IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
7132{
7133 /*
7134 * Check preconditions.
7135 */
7136 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
7137 return iemRaiseUndefinedOpcode(pVCpu);
7138 if (pVCpu->iem.s.uCpl != 0)
7139 return iemRaiseGeneralProtectionFault0(pVCpu);
7140
7141 /*
7142 * Check nested-guest intercepts.
7143 */
7144#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7145 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7146 {
7147 if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_RDMSR, pVCpu->cpum.GstCtx.ecx))
7148 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDMSR, cbInstr);
7149 }
7150#endif
7151
7152#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7153 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
7154 {
7155 VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, pVCpu->cpum.GstCtx.ecx, false /* fWrite */);
7156 if (rcStrict == VINF_SVM_VMEXIT)
7157 return VINF_SUCCESS;
7158 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7159 {
7160 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.ecx, VBOXSTRICTRC_VAL(rcStrict)));
7161 return rcStrict;
7162 }
7163 }
7164#endif
7165
7166 /*
7167 * Do the job.
7168 */
7169 RTUINT64U uValue;
7170 /** @todo make CPUMAllMsrs.cpp import the necessary MSR state. */
7171 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
7172
7173 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pVCpu->cpum.GstCtx.ecx, &uValue.u);
7174 if (rcStrict == VINF_SUCCESS)
7175 {
7176 pVCpu->cpum.GstCtx.rax = uValue.s.Lo;
7177 pVCpu->cpum.GstCtx.rdx = uValue.s.Hi;
7178 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
7179
7180 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7181 }
7182
7183#ifndef IN_RING3
7184 /* Deferred to ring-3. */
7185 if (rcStrict == VINF_CPUM_R3_MSR_READ)
7186 {
7187 Log(("IEM: rdmsr(%#x) -> ring-3\n", pVCpu->cpum.GstCtx.ecx));
7188 return rcStrict;
7189 }
7190#endif
7191
7192 /* Often a unimplemented MSR or MSR bit, so worth logging. */
7193 if (pVCpu->iem.s.cLogRelRdMsr < 32)
7194 {
7195 pVCpu->iem.s.cLogRelRdMsr++;
7196 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx));
7197 }
7198 else
7199 Log(( "IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx));
7200 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
7201 return iemRaiseGeneralProtectionFault0(pVCpu);
7202}
7203
7204
7205/**
7206 * Implements WRMSR.
7207 */
7208IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
7209{
7210 /*
7211 * Check preconditions.
7212 */
7213 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
7214 return iemRaiseUndefinedOpcode(pVCpu);
7215 if (pVCpu->iem.s.uCpl != 0)
7216 return iemRaiseGeneralProtectionFault0(pVCpu);
7217
7218 RTUINT64U uValue;
7219 uValue.s.Lo = pVCpu->cpum.GstCtx.eax;
7220 uValue.s.Hi = pVCpu->cpum.GstCtx.edx;
7221
7222 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7223
7224 /** @todo make CPUMAllMsrs.cpp import the necessary MSR state. */
7225 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
7226
7227 /*
7228 * Check nested-guest intercepts.
7229 */
7230#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7231 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7232 {
7233 if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_WRMSR, idMsr))
7234 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_WRMSR, cbInstr);
7235 }
7236#endif
7237
7238#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7239 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
7240 {
7241 VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, idMsr, true /* fWrite */);
7242 if (rcStrict == VINF_SVM_VMEXIT)
7243 return VINF_SUCCESS;
7244 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7245 {
7246 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", idMsr, VBOXSTRICTRC_VAL(rcStrict)));
7247 return rcStrict;
7248 }
7249 }
7250#endif
7251
7252 /*
7253 * Do the job.
7254 */
7255 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, idMsr, uValue.u);
7256 if (rcStrict == VINF_SUCCESS)
7257 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7258
7259#ifndef IN_RING3
7260 /* Deferred to ring-3. */
7261 if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
7262 {
7263 Log(("IEM: wrmsr(%#x) -> ring-3\n", idMsr));
7264 return rcStrict;
7265 }
7266#endif
7267
7268 /* Often a unimplemented MSR or MSR bit, so worth logging. */
7269 if (pVCpu->iem.s.cLogRelWrMsr < 32)
7270 {
7271 pVCpu->iem.s.cLogRelWrMsr++;
7272 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
7273 }
7274 else
7275 Log(( "IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
7276 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
7277 return iemRaiseGeneralProtectionFault0(pVCpu);
7278}
7279
7280
7281/**
7282 * Implements 'IN eAX, port'.
7283 *
7284 * @param u16Port The source port.
7285 * @param fImm Whether the port was specified through an immediate operand
7286 * or the implicit DX register.
7287 * @param cbReg The register size.
7288 */
7289IEM_CIMPL_DEF_3(iemCImpl_in, uint16_t, u16Port, bool, fImm, uint8_t, cbReg)
7290{
7291 /*
7292 * CPL check
7293 */
7294 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg);
7295 if (rcStrict != VINF_SUCCESS)
7296 return rcStrict;
7297
7298 /*
7299 * Check VMX nested-guest IO intercept.
7300 */
7301#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7302 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7303 {
7304 rcStrict = iemVmxVmexitInstrIo(pVCpu, VMXINSTRID_IO_IN, u16Port, fImm, cbReg, cbInstr);
7305 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
7306 return rcStrict;
7307 }
7308#else
7309 RT_NOREF(fImm);
7310#endif
7311
7312 /*
7313 * Check SVM nested-guest IO intercept.
7314 */
7315#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7316 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
7317 {
7318 uint8_t cAddrSizeBits;
7319 switch (pVCpu->iem.s.enmEffAddrMode)
7320 {
7321 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
7322 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
7323 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
7324 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7325 }
7326 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
7327 false /* fRep */, false /* fStrIo */, cbInstr);
7328 if (rcStrict == VINF_SVM_VMEXIT)
7329 return VINF_SUCCESS;
7330 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7331 {
7332 Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
7333 VBOXSTRICTRC_VAL(rcStrict)));
7334 return rcStrict;
7335 }
7336 }
7337#endif
7338
7339 /*
7340 * Perform the I/O.
7341 */
7342 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
7343 uint32_t u32Value = 0;
7344 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, cbReg);
7345 if (IOM_SUCCESS(rcStrict))
7346 {
7347 switch (cbReg)
7348 {
7349 case 1: pVCpu->cpum.GstCtx.al = (uint8_t)u32Value; break;
7350 case 2: pVCpu->cpum.GstCtx.ax = (uint16_t)u32Value; break;
7351 case 4: pVCpu->cpum.GstCtx.rax = u32Value; break;
7352 default: AssertFailedReturn(VERR_IEM_IPE_3);
7353 }
7354
7355 pVCpu->iem.s.cPotentialExits++;
7356 if (rcStrict != VINF_SUCCESS)
7357 iemSetPassUpStatus(pVCpu, rcStrict);
7358
7359 /*
7360 * Check for I/O breakpoints before we complete the instruction.
7361 */
7362 uint32_t const fDr7 = pVCpu->cpum.GstCtx.dr[7];
7363 if (RT_UNLIKELY( ( ( (fDr7 & X86_DR7_ENABLED_MASK)
7364 && X86_DR7_ANY_RW_IO(fDr7)
7365 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE))
7366 || pVM->dbgf.ro.cEnabledHwIoBreakpoints > 0)
7367 && rcStrict == VINF_SUCCESS))
7368 {
7369 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);
7370 pVCpu->cpum.GstCtx.eflags.uBoth |= DBGFBpCheckIo2(pVM, pVCpu, u16Port, cbReg);
7371 }
7372
7373 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7374 }
7375
7376 return rcStrict;
7377}
7378
7379
7380/**
7381 * Implements 'IN eAX, DX'.
7382 *
7383 * @param cbReg The register size.
7384 */
7385IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
7386{
7387 return IEM_CIMPL_CALL_3(iemCImpl_in, pVCpu->cpum.GstCtx.dx, false /* fImm */, cbReg);
7388}
7389
7390
7391/**
7392 * Implements 'OUT port, eAX'.
7393 *
7394 * @param u16Port The destination port.
7395 * @param fImm Whether the port was specified through an immediate operand
7396 * or the implicit DX register.
7397 * @param cbReg The register size.
7398 */
7399IEM_CIMPL_DEF_3(iemCImpl_out, uint16_t, u16Port, bool, fImm, uint8_t, cbReg)
7400{
7401 /*
7402 * CPL check
7403 */
7404 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg);
7405 if (rcStrict != VINF_SUCCESS)
7406 return rcStrict;
7407
7408 /*
7409 * Check VMX nested-guest I/O intercept.
7410 */
7411#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7412 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7413 {
7414 rcStrict = iemVmxVmexitInstrIo(pVCpu, VMXINSTRID_IO_OUT, u16Port, fImm, cbReg, cbInstr);
7415 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
7416 return rcStrict;
7417 }
7418#else
7419 RT_NOREF(fImm);
7420#endif
7421
7422 /*
7423 * Check SVM nested-guest I/O intercept.
7424 */
7425#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7426 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
7427 {
7428 uint8_t cAddrSizeBits;
7429 switch (pVCpu->iem.s.enmEffAddrMode)
7430 {
7431 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
7432 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
7433 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
7434 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7435 }
7436 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
7437 false /* fRep */, false /* fStrIo */, cbInstr);
7438 if (rcStrict == VINF_SVM_VMEXIT)
7439 return VINF_SUCCESS;
7440 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7441 {
7442 Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
7443 VBOXSTRICTRC_VAL(rcStrict)));
7444 return rcStrict;
7445 }
7446 }
7447#endif
7448
7449 /*
7450 * Perform the I/O.
7451 */
7452 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
7453 uint32_t u32Value;
7454 switch (cbReg)
7455 {
7456 case 1: u32Value = pVCpu->cpum.GstCtx.al; break;
7457 case 2: u32Value = pVCpu->cpum.GstCtx.ax; break;
7458 case 4: u32Value = pVCpu->cpum.GstCtx.eax; break;
7459 default: AssertFailedReturn(VERR_IEM_IPE_4);
7460 }
7461 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, u32Value, cbReg);
7462 if (IOM_SUCCESS(rcStrict))
7463 {
7464 pVCpu->iem.s.cPotentialExits++;
7465 if (rcStrict != VINF_SUCCESS)
7466 iemSetPassUpStatus(pVCpu, rcStrict);
7467
7468 /*
7469 * Check for I/O breakpoints before we complete the instruction.
7470 */
7471 uint32_t const fDr7 = pVCpu->cpum.GstCtx.dr[7];
7472 if (RT_UNLIKELY( ( ( (fDr7 & X86_DR7_ENABLED_MASK)
7473 && X86_DR7_ANY_RW_IO(fDr7)
7474 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE))
7475 || pVM->dbgf.ro.cEnabledHwIoBreakpoints > 0)
7476 && rcStrict == VINF_SUCCESS))
7477 {
7478 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);
7479 pVCpu->cpum.GstCtx.eflags.uBoth |= DBGFBpCheckIo2(pVM, pVCpu, u16Port, cbReg);
7480 }
7481
7482 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7483 }
7484 return rcStrict;
7485}
7486
7487
7488/**
7489 * Implements 'OUT DX, eAX'.
7490 *
7491 * @param cbReg The register size.
7492 */
7493IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
7494{
7495 return IEM_CIMPL_CALL_3(iemCImpl_out, pVCpu->cpum.GstCtx.dx, false /* fImm */, cbReg);
7496}
7497
7498
7499/**
7500 * Implements 'CLI'.
7501 */
7502IEM_CIMPL_DEF_0(iemCImpl_cli)
7503{
7504 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
7505#ifdef LOG_ENABLED
7506 uint32_t const fEflOld = fEfl;
7507#endif
7508
7509 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);
7510 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
7511 {
7512 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
7513 if (!(fEfl & X86_EFL_VM))
7514 {
7515 if (pVCpu->iem.s.uCpl <= uIopl)
7516 fEfl &= ~X86_EFL_IF;
7517 else if ( pVCpu->iem.s.uCpl == 3
7518 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI) )
7519 fEfl &= ~X86_EFL_VIF;
7520 else
7521 return iemRaiseGeneralProtectionFault0(pVCpu);
7522 }
7523 /* V8086 */
7524 else if (uIopl == 3)
7525 fEfl &= ~X86_EFL_IF;
7526 else if ( uIopl < 3
7527 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) )
7528 fEfl &= ~X86_EFL_VIF;
7529 else
7530 return iemRaiseGeneralProtectionFault0(pVCpu);
7531 }
7532 /* real mode */
7533 else
7534 fEfl &= ~X86_EFL_IF;
7535
7536 /* Commit. */
7537 IEMMISC_SET_EFL(pVCpu, fEfl);
7538 VBOXSTRICTRC const rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7539 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl));
7540 return rcStrict;
7541}
7542
7543
7544/**
7545 * Implements 'STI'.
7546 */
7547IEM_CIMPL_DEF_0(iemCImpl_sti)
7548{
7549 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
7550 uint32_t const fEflOld = fEfl;
7551
7552 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);
7553 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
7554 {
7555 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
7556 if (!(fEfl & X86_EFL_VM))
7557 {
7558 if (pVCpu->iem.s.uCpl <= uIopl)
7559 fEfl |= X86_EFL_IF;
7560 else if ( pVCpu->iem.s.uCpl == 3
7561 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI)
7562 && !(fEfl & X86_EFL_VIP) )
7563 fEfl |= X86_EFL_VIF;
7564 else
7565 return iemRaiseGeneralProtectionFault0(pVCpu);
7566 }
7567 /* V8086 */
7568 else if (uIopl == 3)
7569 fEfl |= X86_EFL_IF;
7570 else if ( uIopl < 3
7571 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME)
7572 && !(fEfl & X86_EFL_VIP) )
7573 fEfl |= X86_EFL_VIF;
7574 else
7575 return iemRaiseGeneralProtectionFault0(pVCpu);
7576 }
7577 /* real mode */
7578 else
7579 fEfl |= X86_EFL_IF;
7580
7581 /*
7582 * Commit.
7583 *
7584 * Note! Setting the shadow interrupt flag must be done after RIP updating.
7585 */
7586 IEMMISC_SET_EFL(pVCpu, fEfl);
7587 VBOXSTRICTRC const rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7588 if (!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF))
7589 {
7590 /** @todo only set it the shadow flag if it was clear before? */
7591 CPUMSetInInterruptShadowSti(&pVCpu->cpum.GstCtx);
7592 }
7593 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
7594 return rcStrict;
7595}
7596
7597
7598/**
7599 * Implements 'HLT'.
7600 */
7601IEM_CIMPL_DEF_0(iemCImpl_hlt)
7602{
7603 if (pVCpu->iem.s.uCpl != 0)
7604 return iemRaiseGeneralProtectionFault0(pVCpu);
7605
7606 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7607 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_HLT_EXIT))
7608 {
7609 Log2(("hlt: Guest intercept -> VM-exit\n"));
7610 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_HLT, cbInstr);
7611 }
7612
7613 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_HLT))
7614 {
7615 Log2(("hlt: Guest intercept -> #VMEXIT\n"));
7616 IEM_SVM_UPDATE_NRIP(pVCpu);
7617 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7618 }
7619
7620 /** @todo finish: This ASSUMES that iemRegAddToRipAndFinishingClearingRF won't
7621 * be returning any status codes relating to non-guest events being raised, as
7622 * we'll mess up the guest HALT otherwise. */
7623 VBOXSTRICTRC rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7624 if (rcStrict == VINF_SUCCESS)
7625 rcStrict = VINF_EM_HALT;
7626 return rcStrict;
7627}
7628
7629
7630/**
7631 * Implements 'MONITOR'.
7632 */
7633IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
7634{
7635 /*
7636 * Permission checks.
7637 */
7638 if (pVCpu->iem.s.uCpl != 0)
7639 {
7640 Log2(("monitor: CPL != 0\n"));
7641 return iemRaiseUndefinedOpcode(pVCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
7642 }
7643 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
7644 {
7645 Log2(("monitor: Not in CPUID\n"));
7646 return iemRaiseUndefinedOpcode(pVCpu);
7647 }
7648
7649 /*
7650 * Check VMX guest-intercept.
7651 * This should be considered a fault-like VM-exit.
7652 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
7653 */
7654 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7655 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_MONITOR_EXIT))
7656 {
7657 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
7658 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_MONITOR, cbInstr);
7659 }
7660
7661 /*
7662 * Gather the operands and validate them.
7663 */
7664 RTGCPTR GCPtrMem = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
7665 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
7666 uint32_t uEdx = pVCpu->cpum.GstCtx.edx;
7667/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
7668 * \#GP first. */
7669 if (uEcx != 0)
7670 {
7671 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx);
7672 return iemRaiseGeneralProtectionFault0(pVCpu);
7673 }
7674
7675 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
7676 if (rcStrict != VINF_SUCCESS)
7677 return rcStrict;
7678
7679 RTGCPHYS GCPhysMem;
7680 /** @todo access size */
7681 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
7682 if (rcStrict != VINF_SUCCESS)
7683 return rcStrict;
7684
7685#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7686 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7687 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
7688 {
7689 /*
7690 * MONITOR does not access the memory, just monitors the address. However,
7691 * if the address falls in the APIC-access page, the address monitored must
7692 * instead be the corresponding address in the virtual-APIC page.
7693 *
7694 * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
7695 */
7696 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA);
7697 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
7698 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
7699 return rcStrict;
7700 }
7701#endif
7702
7703 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
7704 {
7705 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
7706 IEM_SVM_UPDATE_NRIP(pVCpu);
7707 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7708 }
7709
7710 /*
7711 * Call EM to prepare the monitor/wait.
7712 */
7713 rcStrict = EMMonitorWaitPrepare(pVCpu, pVCpu->cpum.GstCtx.rax, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.rdx, GCPhysMem);
7714 Assert(rcStrict == VINF_SUCCESS);
7715 if (rcStrict == VINF_SUCCESS)
7716 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7717 return rcStrict;
7718}
7719
7720
7721/**
7722 * Implements 'MWAIT'.
7723 */
7724IEM_CIMPL_DEF_0(iemCImpl_mwait)
7725{
7726 /*
7727 * Permission checks.
7728 */
7729 if (pVCpu->iem.s.uCpl != 0)
7730 {
7731 Log2(("mwait: CPL != 0\n"));
7732 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
7733 * EFLAGS.VM then.) */
7734 return iemRaiseUndefinedOpcode(pVCpu);
7735 }
7736 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
7737 {
7738 Log2(("mwait: Not in CPUID\n"));
7739 return iemRaiseUndefinedOpcode(pVCpu);
7740 }
7741
7742 /* Check VMX nested-guest intercept. */
7743 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7744 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_MWAIT_EXIT))
7745 IEM_VMX_VMEXIT_MWAIT_RET(pVCpu, EMMonitorIsArmed(pVCpu), cbInstr);
7746
7747 /*
7748 * Gather the operands and validate them.
7749 */
7750 uint32_t const uEax = pVCpu->cpum.GstCtx.eax;
7751 uint32_t const uEcx = pVCpu->cpum.GstCtx.ecx;
7752 if (uEcx != 0)
7753 {
7754 /* Only supported extension is break on IRQ when IF=0. */
7755 if (uEcx > 1)
7756 {
7757 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
7758 return iemRaiseGeneralProtectionFault0(pVCpu);
7759 }
7760 uint32_t fMWaitFeatures = 0;
7761 uint32_t uIgnore = 0;
7762 CPUMGetGuestCpuId(pVCpu, 5, 0, -1 /*f64BitMode*/, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
7763 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
7764 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
7765 {
7766 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
7767 return iemRaiseGeneralProtectionFault0(pVCpu);
7768 }
7769
7770#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7771 /*
7772 * If the interrupt-window exiting control is set or a virtual-interrupt is pending
7773 * for delivery; and interrupts are disabled the processor does not enter its
7774 * mwait state but rather passes control to the next instruction.
7775 *
7776 * See Intel spec. 25.3 "Changes to Instruction Behavior In VMX Non-root Operation".
7777 */
7778 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7779 && !pVCpu->cpum.GstCtx.eflags.Bits.u1IF)
7780 {
7781 if ( IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INT_WINDOW_EXIT)
7782 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
7783 /** @todo finish: check up this out after we move int window stuff out of the
7784 * run loop and into the instruction finishing logic here. */
7785 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7786 }
7787#endif
7788 }
7789
7790 /*
7791 * Check SVM nested-guest mwait intercepts.
7792 */
7793 if ( IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT_ARMED)
7794 && EMMonitorIsArmed(pVCpu))
7795 {
7796 Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n"));
7797 IEM_SVM_UPDATE_NRIP(pVCpu);
7798 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7799 }
7800 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
7801 {
7802 Log2(("mwait: Guest intercept -> #VMEXIT\n"));
7803 IEM_SVM_UPDATE_NRIP(pVCpu);
7804 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7805 }
7806
7807 /*
7808 * Call EM to prepare the monitor/wait.
7809 *
7810 * This will return VINF_EM_HALT. If there the trap flag is set, we may
7811 * override it when executing iemRegAddToRipAndFinishingClearingRF ASSUMING
7812 * that will only return guest related events.
7813 */
7814 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(pVCpu, uEax, uEcx);
7815
7816 /** @todo finish: This needs more thinking as we should suppress internal
7817 * debugger events here, or we'll bugger up the guest state even more than we
7818 * alread do around VINF_EM_HALT. */
7819 VBOXSTRICTRC rcStrict2 = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7820 if (rcStrict2 != VINF_SUCCESS)
7821 {
7822 Log2(("mwait: %Rrc (perform) -> %Rrc (finish)!\n", VBOXSTRICTRC_VAL(rcStrict), VBOXSTRICTRC_VAL(rcStrict2) ));
7823 rcStrict = rcStrict2;
7824 }
7825
7826 return rcStrict;
7827}
7828
7829
7830/**
7831 * Implements 'SWAPGS'.
7832 */
7833IEM_CIMPL_DEF_0(iemCImpl_swapgs)
7834{
7835 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); /* Caller checks this. */
7836
7837 /*
7838 * Permission checks.
7839 */
7840 if (pVCpu->iem.s.uCpl != 0)
7841 {
7842 Log2(("swapgs: CPL != 0\n"));
7843 return iemRaiseUndefinedOpcode(pVCpu);
7844 }
7845
7846 /*
7847 * Do the job.
7848 */
7849 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_GS);
7850 uint64_t uOtherGsBase = pVCpu->cpum.GstCtx.msrKERNELGSBASE;
7851 pVCpu->cpum.GstCtx.msrKERNELGSBASE = pVCpu->cpum.GstCtx.gs.u64Base;
7852 pVCpu->cpum.GstCtx.gs.u64Base = uOtherGsBase;
7853
7854 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7855}
7856
7857
7858#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
7859/**
7860 * Handles a CPUID call.
7861 */
7862static VBOXSTRICTRC iemCpuIdVBoxCall(PVMCPUCC pVCpu, uint32_t iFunction,
7863 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
7864{
7865 switch (iFunction)
7866 {
7867 case VBOX_CPUID_FN_ID:
7868 LogFlow(("iemCpuIdVBoxCall: VBOX_CPUID_FN_ID\n"));
7869 *pEax = VBOX_CPUID_RESP_ID_EAX;
7870 *pEbx = VBOX_CPUID_RESP_ID_EBX;
7871 *pEcx = VBOX_CPUID_RESP_ID_ECX;
7872 *pEdx = VBOX_CPUID_RESP_ID_EDX;
7873 break;
7874
7875 case VBOX_CPUID_FN_LOG:
7876 {
7877 CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX | CPUMCTX_EXTRN_RSI
7878 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7879
7880 /* Validate input. */
7881 uint32_t cchToLog = *pEdx;
7882 if (cchToLog <= _2M)
7883 {
7884 uint32_t const uLogPicker = *pEbx;
7885 if (uLogPicker <= 1)
7886 {
7887 /* Resolve the logger. */
7888 PRTLOGGER const pLogger = !uLogPicker
7889 ? RTLogDefaultInstanceEx(UINT32_MAX) : RTLogRelGetDefaultInstanceEx(UINT32_MAX);
7890 if (pLogger)
7891 {
7892 /* Copy over the data: */
7893 RTGCPTR GCPtrSrc = pVCpu->cpum.GstCtx.rsi;
7894 while (cchToLog > 0)
7895 {
7896 uint32_t cbToMap = GUEST_PAGE_SIZE - (GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
7897 if (cbToMap > cchToLog)
7898 cbToMap = cchToLog;
7899 /** @todo Extend iemMemMap to allowing page size accessing and avoid 7
7900 * unnecessary calls & iterations per pages. */
7901 if (cbToMap > 512)
7902 cbToMap = 512;
7903 void *pvSrc = NULL;
7904 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvSrc, cbToMap, UINT8_MAX, GCPtrSrc, IEM_ACCESS_DATA_R, 0);
7905 if (rcStrict == VINF_SUCCESS)
7906 {
7907 RTLogBulkNestedWrite(pLogger, (const char *)pvSrc, cbToMap, "Gst:");
7908 rcStrict = iemMemCommitAndUnmap(pVCpu, pvSrc, IEM_ACCESS_DATA_R);
7909 AssertRCSuccessReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
7910 }
7911 else
7912 {
7913 Log(("iemCpuIdVBoxCall: %Rrc at %RGp LB %#x\n", VBOXSTRICTRC_VAL(rcStrict), GCPtrSrc, cbToMap));
7914 return rcStrict;
7915 }
7916
7917 /* Advance. */
7918 pVCpu->cpum.GstCtx.rsi = GCPtrSrc += cbToMap;
7919 *pEdx = cchToLog -= cbToMap;
7920 }
7921 *pEax = VINF_SUCCESS;
7922 }
7923 else
7924 *pEax = (uint32_t)VERR_NOT_FOUND;
7925 }
7926 else
7927 *pEax = (uint32_t)VERR_NOT_FOUND;
7928 }
7929 else
7930 *pEax = (uint32_t)VERR_TOO_MUCH_DATA;
7931 *pEdx = VBOX_CPUID_RESP_GEN_EDX;
7932 *pEcx = VBOX_CPUID_RESP_GEN_ECX;
7933 *pEbx = VBOX_CPUID_RESP_GEN_EBX;
7934 break;
7935 }
7936
7937 default:
7938 LogFlow(("iemCpuIdVBoxCall: Invalid function %#x (%#x, %#x)\n", iFunction, *pEbx, *pEdx));
7939 *pEax = (uint32_t)VERR_INVALID_FUNCTION;
7940 *pEbx = (uint32_t)VERR_INVALID_FUNCTION;
7941 *pEcx = (uint32_t)VERR_INVALID_FUNCTION;
7942 *pEdx = (uint32_t)VERR_INVALID_FUNCTION;
7943 break;
7944 }
7945 return VINF_SUCCESS;
7946}
7947#endif /* VBOX_WITHOUT_CPUID_HOST_CALL */
7948
7949/**
7950 * Implements 'CPUID'.
7951 */
7952IEM_CIMPL_DEF_0(iemCImpl_cpuid)
7953{
7954 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7955 {
7956 Log2(("cpuid: Guest intercept -> VM-exit\n"));
7957 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_CPUID, cbInstr);
7958 }
7959
7960 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
7961 {
7962 Log2(("cpuid: Guest intercept -> #VMEXIT\n"));
7963 IEM_SVM_UPDATE_NRIP(pVCpu);
7964 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7965 }
7966
7967
7968 uint32_t const uEax = pVCpu->cpum.GstCtx.eax;
7969 uint32_t const uEcx = pVCpu->cpum.GstCtx.ecx;
7970
7971#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
7972 /*
7973 * CPUID host call backdoor.
7974 */
7975 if ( uEax == VBOX_CPUID_REQ_EAX_FIXED
7976 && (uEcx & VBOX_CPUID_REQ_ECX_FIXED_MASK) == VBOX_CPUID_REQ_ECX_FIXED
7977 && pVCpu->CTX_SUFF(pVM)->iem.s.fCpuIdHostCall)
7978 {
7979 VBOXSTRICTRC rcStrict = iemCpuIdVBoxCall(pVCpu, uEcx & VBOX_CPUID_REQ_ECX_FN_MASK,
7980 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx,
7981 &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
7982 if (rcStrict != VINF_SUCCESS)
7983 return rcStrict;
7984 }
7985 /*
7986 * Regular CPUID.
7987 */
7988 else
7989#endif
7990 CPUMGetGuestCpuId(pVCpu, uEax, uEcx, pVCpu->cpum.GstCtx.cs.Attr.n.u1Long,
7991 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
7992
7993 pVCpu->cpum.GstCtx.rax &= UINT32_C(0xffffffff);
7994 pVCpu->cpum.GstCtx.rbx &= UINT32_C(0xffffffff);
7995 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff);
7996 pVCpu->cpum.GstCtx.rdx &= UINT32_C(0xffffffff);
7997 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
7998
7999 pVCpu->iem.s.cPotentialExits++;
8000 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8001}
8002
8003
8004/**
8005 * Implements 'AAD'.
8006 *
8007 * @param bImm The immediate operand.
8008 */
8009IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
8010{
8011 uint16_t const ax = pVCpu->cpum.GstCtx.ax;
8012 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
8013 pVCpu->cpum.GstCtx.ax = al;
8014 iemHlpUpdateArithEFlagsU8(pVCpu, al,
8015 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
8016 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
8017
8018 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8019}
8020
8021
8022/**
8023 * Implements 'AAM'.
8024 *
8025 * @param bImm The immediate operand. Cannot be 0.
8026 */
8027IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
8028{
8029 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
8030
8031 uint16_t const ax = pVCpu->cpum.GstCtx.ax;
8032 uint8_t const al = (uint8_t)ax % bImm;
8033 uint8_t const ah = (uint8_t)ax / bImm;
8034 pVCpu->cpum.GstCtx.ax = (ah << 8) + al;
8035 iemHlpUpdateArithEFlagsU8(pVCpu, al,
8036 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
8037 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
8038
8039 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8040}
8041
8042
8043/**
8044 * Implements 'DAA'.
8045 */
8046IEM_CIMPL_DEF_0(iemCImpl_daa)
8047{
8048 uint8_t const al = pVCpu->cpum.GstCtx.al;
8049 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF;
8050
8051 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8052 || (al & 0xf) >= 10)
8053 {
8054 pVCpu->cpum.GstCtx.al = al + 6;
8055 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8056 }
8057 else
8058 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8059
8060 if (al >= 0x9a || fCarry)
8061 {
8062 pVCpu->cpum.GstCtx.al += 0x60;
8063 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8064 }
8065 else
8066 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8067
8068 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8069 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8070}
8071
8072
8073/**
8074 * Implements 'DAS'.
8075 */
8076IEM_CIMPL_DEF_0(iemCImpl_das)
8077{
8078 uint8_t const uInputAL = pVCpu->cpum.GstCtx.al;
8079 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF;
8080
8081 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8082 || (uInputAL & 0xf) >= 10)
8083 {
8084 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8085 if (uInputAL < 6)
8086 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8087 pVCpu->cpum.GstCtx.al = uInputAL - 6;
8088 }
8089 else
8090 {
8091 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8092 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8093 }
8094
8095 if (uInputAL >= 0x9a || fCarry)
8096 {
8097 pVCpu->cpum.GstCtx.al -= 0x60;
8098 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8099 }
8100
8101 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8102 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8103}
8104
8105
8106/**
8107 * Implements 'AAA'.
8108 */
8109IEM_CIMPL_DEF_0(iemCImpl_aaa)
8110{
8111 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
8112 {
8113 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8114 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8115 {
8116 iemAImpl_add_u16(&pVCpu->cpum.GstCtx.ax, 0x106, &pVCpu->cpum.GstCtx.eflags.uBoth);
8117 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8118 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8119 }
8120 else
8121 {
8122 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8123 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8124 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8125 }
8126 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8127 }
8128 else
8129 {
8130 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8131 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8132 {
8133 pVCpu->cpum.GstCtx.ax += UINT16_C(0x106);
8134 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8135 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8136 }
8137 else
8138 {
8139 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8140 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8141 }
8142 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8143 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8144 }
8145
8146 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8147}
8148
8149
8150/**
8151 * Implements 'AAS'.
8152 */
8153IEM_CIMPL_DEF_0(iemCImpl_aas)
8154{
8155 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
8156 {
8157 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8158 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8159 {
8160 iemAImpl_sub_u16(&pVCpu->cpum.GstCtx.ax, 0x106, &pVCpu->cpum.GstCtx.eflags.uBoth);
8161 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8162 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8163 }
8164 else
8165 {
8166 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8167 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8168 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8169 }
8170 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8171 }
8172 else
8173 {
8174 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8175 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8176 {
8177 pVCpu->cpum.GstCtx.ax -= UINT16_C(0x106);
8178 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8179 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8180 }
8181 else
8182 {
8183 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8184 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8185 }
8186 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8187 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8188 }
8189
8190 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8191}
8192
8193
8194/**
8195 * Implements the 16-bit version of 'BOUND'.
8196 *
8197 * @note We have separate 16-bit and 32-bit variants of this function due to
8198 * the decoder using unsigned parameters, whereas we want signed one to
8199 * do the job. This is significant for a recompiler.
8200 */
8201IEM_CIMPL_DEF_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound)
8202{
8203 /*
8204 * Check if the index is inside the bounds, otherwise raise #BR.
8205 */
8206 if ( idxArray >= idxLowerBound
8207 && idxArray <= idxUpperBound)
8208 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8209 return iemRaiseBoundRangeExceeded(pVCpu);
8210}
8211
8212
8213/**
8214 * Implements the 32-bit version of 'BOUND'.
8215 */
8216IEM_CIMPL_DEF_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound)
8217{
8218 /*
8219 * Check if the index is inside the bounds, otherwise raise #BR.
8220 */
8221 if ( idxArray >= idxLowerBound
8222 && idxArray <= idxUpperBound)
8223 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8224 return iemRaiseBoundRangeExceeded(pVCpu);
8225}
8226
8227
8228
8229/*
8230 * Instantiate the various string operation combinations.
8231 */
8232#define OP_SIZE 8
8233#define ADDR_SIZE 16
8234#include "IEMAllCImplStrInstr.cpp.h"
8235#define OP_SIZE 8
8236#define ADDR_SIZE 32
8237#include "IEMAllCImplStrInstr.cpp.h"
8238#define OP_SIZE 8
8239#define ADDR_SIZE 64
8240#include "IEMAllCImplStrInstr.cpp.h"
8241
8242#define OP_SIZE 16
8243#define ADDR_SIZE 16
8244#include "IEMAllCImplStrInstr.cpp.h"
8245#define OP_SIZE 16
8246#define ADDR_SIZE 32
8247#include "IEMAllCImplStrInstr.cpp.h"
8248#define OP_SIZE 16
8249#define ADDR_SIZE 64
8250#include "IEMAllCImplStrInstr.cpp.h"
8251
8252#define OP_SIZE 32
8253#define ADDR_SIZE 16
8254#include "IEMAllCImplStrInstr.cpp.h"
8255#define OP_SIZE 32
8256#define ADDR_SIZE 32
8257#include "IEMAllCImplStrInstr.cpp.h"
8258#define OP_SIZE 32
8259#define ADDR_SIZE 64
8260#include "IEMAllCImplStrInstr.cpp.h"
8261
8262#define OP_SIZE 64
8263#define ADDR_SIZE 32
8264#include "IEMAllCImplStrInstr.cpp.h"
8265#define OP_SIZE 64
8266#define ADDR_SIZE 64
8267#include "IEMAllCImplStrInstr.cpp.h"
8268
8269
8270/**
8271 * Implements 'XGETBV'.
8272 */
8273IEM_CIMPL_DEF_0(iemCImpl_xgetbv)
8274{
8275 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
8276 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)
8277 {
8278 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
8279 switch (uEcx)
8280 {
8281 case 0:
8282 break;
8283
8284 case 1: /** @todo Implement XCR1 support. */
8285 default:
8286 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx));
8287 return iemRaiseGeneralProtectionFault0(pVCpu);
8288
8289 }
8290 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx);
8291 pVCpu->cpum.GstCtx.rax = RT_LO_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]);
8292 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]);
8293
8294 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8295 }
8296 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n"));
8297 return iemRaiseUndefinedOpcode(pVCpu);
8298}
8299
8300
8301/**
8302 * Implements 'XSETBV'.
8303 */
8304IEM_CIMPL_DEF_0(iemCImpl_xsetbv)
8305{
8306 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)
8307 {
8308 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
8309 {
8310 Log2(("xsetbv: Guest intercept -> #VMEXIT\n"));
8311 IEM_SVM_UPDATE_NRIP(pVCpu);
8312 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
8313 }
8314
8315 if (pVCpu->iem.s.uCpl == 0)
8316 {
8317 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx);
8318
8319 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8320 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_XSETBV, cbInstr);
8321
8322 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
8323 uint64_t uNewValue = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx);
8324 switch (uEcx)
8325 {
8326 case 0:
8327 {
8328 int rc = CPUMSetGuestXcr0(pVCpu, uNewValue);
8329 if (rc == VINF_SUCCESS)
8330 break;
8331 Assert(rc == VERR_CPUM_RAISE_GP_0);
8332 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
8333 return iemRaiseGeneralProtectionFault0(pVCpu);
8334 }
8335
8336 case 1: /** @todo Implement XCR1 support. */
8337 default:
8338 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
8339 return iemRaiseGeneralProtectionFault0(pVCpu);
8340
8341 }
8342
8343 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8344 }
8345
8346 Log(("xsetbv cpl=%u -> GP(0)\n", pVCpu->iem.s.uCpl));
8347 return iemRaiseGeneralProtectionFault0(pVCpu);
8348 }
8349 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n"));
8350 return iemRaiseUndefinedOpcode(pVCpu);
8351}
8352
8353#ifndef RT_ARCH_ARM64
8354# ifdef IN_RING3
8355
8356/** Argument package for iemCImpl_cmpxchg16b_fallback_rendezvous_callback. */
8357struct IEMCIMPLCX16ARGS
8358{
8359 PRTUINT128U pu128Dst;
8360 PRTUINT128U pu128RaxRdx;
8361 PRTUINT128U pu128RbxRcx;
8362 uint32_t *pEFlags;
8363# ifdef VBOX_STRICT
8364 uint32_t cCalls;
8365# endif
8366};
8367
8368/**
8369 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
8370 * Worker for iemCImpl_cmpxchg16b_fallback_rendezvous}
8371 */
8372static DECLCALLBACK(VBOXSTRICTRC) iemCImpl_cmpxchg16b_fallback_rendezvous_callback(PVM pVM, PVMCPUCC pVCpu, void *pvUser)
8373{
8374 RT_NOREF(pVM, pVCpu);
8375 struct IEMCIMPLCX16ARGS *pArgs = (struct IEMCIMPLCX16ARGS *)pvUser;
8376# ifdef VBOX_STRICT
8377 Assert(pArgs->cCalls == 0);
8378 pArgs->cCalls++;
8379# endif
8380
8381 iemAImpl_cmpxchg16b_fallback(pArgs->pu128Dst, pArgs->pu128RaxRdx, pArgs->pu128RbxRcx, pArgs->pEFlags);
8382 return VINF_SUCCESS;
8383}
8384
8385# endif /* IN_RING3 */
8386
8387/**
8388 * Implements 'CMPXCHG16B' fallback using rendezvous.
8389 */
8390IEM_CIMPL_DEF_4(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
8391 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags)
8392{
8393# ifdef IN_RING3
8394 struct IEMCIMPLCX16ARGS Args;
8395 Args.pu128Dst = pu128Dst;
8396 Args.pu128RaxRdx = pu128RaxRdx;
8397 Args.pu128RbxRcx = pu128RbxRcx;
8398 Args.pEFlags = pEFlags;
8399# ifdef VBOX_STRICT
8400 Args.cCalls = 0;
8401# endif
8402 VBOXSTRICTRC rcStrict = VMMR3EmtRendezvous(pVCpu->CTX_SUFF(pVM), VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
8403 iemCImpl_cmpxchg16b_fallback_rendezvous_callback, &Args);
8404 Assert(Args.cCalls == 1);
8405 if (rcStrict == VINF_SUCCESS)
8406 {
8407 /* Duplicated tail code. */
8408 rcStrict = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_RW);
8409 if (rcStrict == VINF_SUCCESS)
8410 {
8411 pVCpu->cpum.GstCtx.eflags.u = *pEFlags; /* IEM_MC_COMMIT_EFLAGS */
8412 if (!(*pEFlags & X86_EFL_ZF))
8413 {
8414 pVCpu->cpum.GstCtx.rax = pu128RaxRdx->s.Lo;
8415 pVCpu->cpum.GstCtx.rdx = pu128RaxRdx->s.Hi;
8416 }
8417 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8418 }
8419 }
8420 return rcStrict;
8421# else
8422 RT_NOREF(pVCpu, cbInstr, pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags);
8423 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; /* This should get us to ring-3 for now. Should perhaps be replaced later. */
8424# endif
8425}
8426
8427#endif /* RT_ARCH_ARM64 */
8428
8429/**
8430 * Implements 'CLFLUSH' and 'CLFLUSHOPT'.
8431 *
8432 * This is implemented in C because it triggers a load like behaviour without
8433 * actually reading anything. Since that's not so common, it's implemented
8434 * here.
8435 *
8436 * @param iEffSeg The effective segment.
8437 * @param GCPtrEff The address of the image.
8438 */
8439IEM_CIMPL_DEF_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
8440{
8441 /*
8442 * Pretend to do a load w/o reading (see also iemCImpl_monitor and iemMemMap).
8443 */
8444 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrEff);
8445 if (rcStrict == VINF_SUCCESS)
8446 {
8447 RTGCPHYS GCPhysMem;
8448 /** @todo access size. */
8449 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrEff, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
8450 if (rcStrict == VINF_SUCCESS)
8451 {
8452#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8453 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8454 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
8455 {
8456 /*
8457 * CLFLUSH/CLFLUSHOPT does not access the memory, but flushes the cache-line
8458 * that contains the address. However, if the address falls in the APIC-access
8459 * page, the address flushed must instead be the corresponding address in the
8460 * virtual-APIC page.
8461 *
8462 * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
8463 */
8464 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA);
8465 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
8466 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
8467 return rcStrict;
8468 }
8469#endif
8470 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8471 }
8472 }
8473
8474 return rcStrict;
8475}
8476
8477
8478/**
8479 * Implements 'FINIT' and 'FNINIT'.
8480 *
8481 * @param fCheckXcpts Whether to check for umasked pending exceptions or
8482 * not.
8483 */
8484IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
8485{
8486 /*
8487 * Exceptions.
8488 */
8489 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8490 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS))
8491 return iemRaiseDeviceNotAvailable(pVCpu);
8492
8493 iemFpuActualizeStateForChange(pVCpu);
8494 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_X87);
8495
8496 /* FINIT: Raise #MF on pending exception(s): */
8497 if (fCheckXcpts && (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES))
8498 return iemRaiseMathFault(pVCpu);
8499
8500 /*
8501 * Reset the state.
8502 */
8503 PX86XSAVEAREA pXState = &pVCpu->cpum.GstCtx.XState;
8504
8505 /* Rotate the stack to account for changed TOS. */
8506 iemFpuRotateStackSetTop(&pXState->x87, 0);
8507
8508 pXState->x87.FCW = 0x37f;
8509 pXState->x87.FSW = 0;
8510 pXState->x87.FTW = 0x00; /* 0 - empty. */
8511 /** @todo Intel says the instruction and data pointers are not cleared on
8512 * 387, presume that 8087 and 287 doesn't do so either. */
8513 /** @todo test this stuff. */
8514 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
8515 {
8516 pXState->x87.FPUDP = 0;
8517 pXState->x87.DS = 0; //??
8518 pXState->x87.Rsrvd2 = 0;
8519 pXState->x87.FPUIP = 0;
8520 pXState->x87.CS = 0; //??
8521 pXState->x87.Rsrvd1 = 0;
8522 }
8523 pXState->x87.FOP = 0;
8524
8525 iemHlpUsedFpu(pVCpu);
8526 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8527}
8528
8529
8530/**
8531 * Implements 'FXSAVE'.
8532 *
8533 * @param iEffSeg The effective segment.
8534 * @param GCPtrEff The address of the image.
8535 * @param enmEffOpSize The operand size (only REX.W really matters).
8536 */
8537IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8538{
8539 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8540
8541 /** @todo check out bugref{1529} and AMD behaviour */
8542
8543 /*
8544 * Raise exceptions.
8545 */
8546 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
8547 return iemRaiseDeviceNotAvailable(pVCpu);
8548
8549 /*
8550 * Access the memory.
8551 */
8552 void *pvMem512;
8553 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
8554 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8555 if (rcStrict != VINF_SUCCESS)
8556 return rcStrict;
8557 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
8558 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.XState.x87;
8559
8560 /*
8561 * Store the registers.
8562 */
8563 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
8564 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
8565
8566 /* common for all formats */
8567 pDst->FCW = pSrc->FCW;
8568 pDst->FSW = pSrc->FSW;
8569 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8570 pDst->FOP = pSrc->FOP;
8571 pDst->MXCSR = pSrc->MXCSR;
8572 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8573 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8574 {
8575 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
8576 * them for now... */
8577 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8578 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8579 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8580 pDst->aRegs[i].au32[3] = 0;
8581 }
8582
8583 /* FPU IP, CS, DP and DS. */
8584 pDst->FPUIP = pSrc->FPUIP;
8585 pDst->CS = pSrc->CS;
8586 pDst->FPUDP = pSrc->FPUDP;
8587 pDst->DS = pSrc->DS;
8588 if (enmEffOpSize == IEMMODE_64BIT)
8589 {
8590 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8591 pDst->Rsrvd1 = pSrc->Rsrvd1;
8592 pDst->Rsrvd2 = pSrc->Rsrvd2;
8593 }
8594 else
8595 {
8596 pDst->Rsrvd1 = 0;
8597 pDst->Rsrvd2 = 0;
8598 }
8599
8600 /* XMM registers. Skipped in 64-bit CPL0 if EFER.FFXSR (AMD only) is set. */
8601 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR)
8602 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
8603 || pVCpu->iem.s.uCpl != 0)
8604 {
8605 uint32_t cXmmRegs = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? 16 : 8;
8606 for (uint32_t i = 0; i < cXmmRegs; i++)
8607 pDst->aXMM[i] = pSrc->aXMM[i];
8608 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8609 * right? */
8610 }
8611
8612 /*
8613 * Commit the memory.
8614 */
8615 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8616 if (rcStrict != VINF_SUCCESS)
8617 return rcStrict;
8618
8619 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8620}
8621
8622
8623/**
8624 * Implements 'FXRSTOR'.
8625 *
8626 * @param iEffSeg The effective segment register for @a GCPtrEff.
8627 * @param GCPtrEff The address of the image.
8628 * @param enmEffOpSize The operand size (only REX.W really matters).
8629 */
8630IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8631{
8632 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8633
8634 /** @todo check out bugref{1529} and AMD behaviour */
8635
8636 /*
8637 * Raise exceptions.
8638 */
8639 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
8640 return iemRaiseDeviceNotAvailable(pVCpu);
8641
8642 /*
8643 * Access the memory.
8644 */
8645 void *pvMem512;
8646 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
8647 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8648 if (rcStrict != VINF_SUCCESS)
8649 return rcStrict;
8650 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
8651 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.XState.x87;
8652
8653 /*
8654 * Check the state for stuff which will #GP(0).
8655 */
8656 uint32_t const fMXCSR = pSrc->MXCSR;
8657 uint32_t const fMXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8658 if (fMXCSR & ~fMXCSR_MASK)
8659 {
8660 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
8661 return iemRaiseGeneralProtectionFault0(pVCpu);
8662 }
8663
8664 /*
8665 * Load the registers.
8666 */
8667 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
8668 * implementation specific whether MXCSR and XMM0-XMM7 are
8669 * restored according to Intel.
8670 * AMD says MXCSR and XMM registers are never loaded if
8671 * CR4.OSFXSR=0.
8672 */
8673
8674 /* common for all formats */
8675 pDst->FCW = pSrc->FCW;
8676 pDst->FSW = pSrc->FSW;
8677 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8678 pDst->FOP = pSrc->FOP;
8679 pDst->MXCSR = fMXCSR;
8680 /* (MXCSR_MASK is read-only) */
8681 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
8682 {
8683 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8684 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8685 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8686 pDst->aRegs[i].au32[3] = 0;
8687 }
8688
8689 /* FPU IP, CS, DP and DS. */
8690 /** @todo AMD says this is only done if FSW.ES is set after loading. */
8691 if (enmEffOpSize == IEMMODE_64BIT)
8692 {
8693 pDst->FPUIP = pSrc->FPUIP;
8694 pDst->CS = pSrc->CS;
8695 pDst->Rsrvd1 = pSrc->Rsrvd1;
8696 pDst->FPUDP = pSrc->FPUDP;
8697 pDst->DS = pSrc->DS;
8698 pDst->Rsrvd2 = pSrc->Rsrvd2;
8699 }
8700 else
8701 {
8702 pDst->FPUIP = pSrc->FPUIP;
8703 pDst->CS = pSrc->CS;
8704 pDst->Rsrvd1 = 0;
8705 pDst->FPUDP = pSrc->FPUDP;
8706 pDst->DS = pSrc->DS;
8707 pDst->Rsrvd2 = 0;
8708 }
8709
8710 /* XMM registers. Skipped in 64-bit CPL0 if EFER.FFXSR (AMD only) is set.
8711 * Does not affect MXCSR, only registers.
8712 */
8713 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR)
8714 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
8715 || pVCpu->iem.s.uCpl != 0)
8716 {
8717 uint32_t cXmmRegs = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? 16 : 8;
8718 for (uint32_t i = 0; i < cXmmRegs; i++)
8719 pDst->aXMM[i] = pSrc->aXMM[i];
8720 }
8721
8722 pDst->FCW &= ~X86_FCW_ZERO_MASK;
8723 iemFpuRecalcExceptionStatus(pDst);
8724
8725 if (pDst->FSW & X86_FSW_ES)
8726 Log11(("fxrstor: %04x:%08RX64: loading state with pending FPU exception (FSW=%#x)\n",
8727 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pSrc->FSW));
8728
8729 /*
8730 * Unmap the memory.
8731 */
8732 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
8733 if (rcStrict != VINF_SUCCESS)
8734 return rcStrict;
8735
8736 iemHlpUsedFpu(pVCpu);
8737 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8738}
8739
8740
8741/**
8742 * Implements 'XSAVE'.
8743 *
8744 * @param iEffSeg The effective segment.
8745 * @param GCPtrEff The address of the image.
8746 * @param enmEffOpSize The operand size (only REX.W really matters).
8747 */
8748IEM_CIMPL_DEF_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8749{
8750 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
8751
8752 /*
8753 * Raise exceptions.
8754 */
8755 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
8756 return iemRaiseUndefinedOpcode(pVCpu);
8757 /* When in VMX non-root mode and XSAVE/XRSTOR is not enabled, it results in #UD. */
8758 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8759 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_XSAVES_XRSTORS))
8760 {
8761 Log(("xrstor: Not enabled for nested-guest execution -> #UD\n"));
8762 return iemRaiseUndefinedOpcode(pVCpu);
8763 }
8764 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)
8765 return iemRaiseDeviceNotAvailable(pVCpu);
8766
8767 /*
8768 * Calc the requested mask.
8769 */
8770 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0];
8771 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
8772 uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0];
8773
8774/** @todo figure out the exact protocol for the memory access. Currently we
8775 * just need this crap to work halfways to make it possible to test
8776 * AVX instructions. */
8777/** @todo figure out the XINUSE and XMODIFIED */
8778
8779 /*
8780 * Access the x87 memory state.
8781 */
8782 /* The x87+SSE state. */
8783 void *pvMem512;
8784 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
8785 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8786 if (rcStrict != VINF_SUCCESS)
8787 return rcStrict;
8788 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
8789 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.XState.x87;
8790
8791 /* The header. */
8792 PX86XSAVEHDR pHdr;
8793 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW, 0 /* checked above */);
8794 if (rcStrict != VINF_SUCCESS)
8795 return rcStrict;
8796
8797 /*
8798 * Store the X87 state.
8799 */
8800 if (fReqComponents & XSAVE_C_X87)
8801 {
8802 /* common for all formats */
8803 pDst->FCW = pSrc->FCW;
8804 pDst->FSW = pSrc->FSW;
8805 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8806 pDst->FOP = pSrc->FOP;
8807 pDst->FPUIP = pSrc->FPUIP;
8808 pDst->CS = pSrc->CS;
8809 pDst->FPUDP = pSrc->FPUDP;
8810 pDst->DS = pSrc->DS;
8811 if (enmEffOpSize == IEMMODE_64BIT)
8812 {
8813 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8814 pDst->Rsrvd1 = pSrc->Rsrvd1;
8815 pDst->Rsrvd2 = pSrc->Rsrvd2;
8816 }
8817 else
8818 {
8819 pDst->Rsrvd1 = 0;
8820 pDst->Rsrvd2 = 0;
8821 }
8822 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8823 {
8824 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
8825 * them for now... */
8826 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8827 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8828 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8829 pDst->aRegs[i].au32[3] = 0;
8830 }
8831
8832 }
8833
8834 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
8835 {
8836 pDst->MXCSR = pSrc->MXCSR;
8837 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8838 }
8839
8840 if (fReqComponents & XSAVE_C_SSE)
8841 {
8842 /* XMM registers. */
8843 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8844 for (uint32_t i = 0; i < cXmmRegs; i++)
8845 pDst->aXMM[i] = pSrc->aXMM[i];
8846 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8847 * right? */
8848 }
8849
8850 /* Commit the x87 state bits. (probably wrong) */
8851 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8852 if (rcStrict != VINF_SUCCESS)
8853 return rcStrict;
8854
8855 /*
8856 * Store AVX state.
8857 */
8858 if (fReqComponents & XSAVE_C_YMM)
8859 {
8860 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
8861 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
8862 PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI);
8863 PX86XSAVEYMMHI pCompDst;
8864 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
8865 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 0 /* checked above */);
8866 if (rcStrict != VINF_SUCCESS)
8867 return rcStrict;
8868
8869 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8870 for (uint32_t i = 0; i < cXmmRegs; i++)
8871 pCompDst->aYmmHi[i] = pCompSrc->aYmmHi[i];
8872
8873 rcStrict = iemMemCommitAndUnmap(pVCpu, pCompDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8874 if (rcStrict != VINF_SUCCESS)
8875 return rcStrict;
8876 }
8877
8878 /*
8879 * Update the header.
8880 */
8881 pHdr->bmXState = (pHdr->bmXState & ~fReqComponents)
8882 | (fReqComponents & fXInUse);
8883
8884 rcStrict = iemMemCommitAndUnmap(pVCpu, pHdr, IEM_ACCESS_DATA_RW);
8885 if (rcStrict != VINF_SUCCESS)
8886 return rcStrict;
8887
8888 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8889}
8890
8891
8892/**
8893 * Implements 'XRSTOR'.
8894 *
8895 * @param iEffSeg The effective segment.
8896 * @param GCPtrEff The address of the image.
8897 * @param enmEffOpSize The operand size (only REX.W really matters).
8898 */
8899IEM_CIMPL_DEF_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8900{
8901 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
8902
8903 /*
8904 * Raise exceptions.
8905 */
8906 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
8907 return iemRaiseUndefinedOpcode(pVCpu);
8908 /* When in VMX non-root mode and XSAVE/XRSTOR is not enabled, it results in #UD. */
8909 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8910 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_XSAVES_XRSTORS))
8911 {
8912 Log(("xrstor: Not enabled for nested-guest execution -> #UD\n"));
8913 return iemRaiseUndefinedOpcode(pVCpu);
8914 }
8915 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)
8916 return iemRaiseDeviceNotAvailable(pVCpu);
8917 if (GCPtrEff & 63)
8918 {
8919 /** @todo CPU/VM detection possible! \#AC might not be signal for
8920 * all/any misalignment sizes, intel says its an implementation detail. */
8921 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
8922 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC
8923 && pVCpu->iem.s.uCpl == 3)
8924 return iemRaiseAlignmentCheckException(pVCpu);
8925 return iemRaiseGeneralProtectionFault0(pVCpu);
8926 }
8927
8928/** @todo figure out the exact protocol for the memory access. Currently we
8929 * just need this crap to work halfways to make it possible to test
8930 * AVX instructions. */
8931/** @todo figure out the XINUSE and XMODIFIED */
8932
8933 /*
8934 * Access the x87 memory state.
8935 */
8936 /* The x87+SSE state. */
8937 void *pvMem512;
8938 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
8939 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8940 if (rcStrict != VINF_SUCCESS)
8941 return rcStrict;
8942 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
8943 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.XState.x87;
8944
8945 /*
8946 * Calc the requested mask
8947 */
8948 PX86XSAVEHDR pHdrDst = &pVCpu->cpum.GstCtx.XState.Hdr;
8949 PCX86XSAVEHDR pHdrSrc;
8950 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512,
8951 IEM_ACCESS_DATA_R, 0 /* checked above */);
8952 if (rcStrict != VINF_SUCCESS)
8953 return rcStrict;
8954
8955 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0];
8956 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
8957 //uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0];
8958 uint64_t const fRstorMask = pHdrSrc->bmXState;
8959 uint64_t const fCompMask = pHdrSrc->bmXComp;
8960
8961 AssertLogRelReturn(!(fCompMask & XSAVE_C_X), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
8962
8963 uint32_t const cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8964
8965 /* We won't need this any longer. */
8966 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pHdrSrc, IEM_ACCESS_DATA_R);
8967 if (rcStrict != VINF_SUCCESS)
8968 return rcStrict;
8969
8970 /*
8971 * Load the X87 state.
8972 */
8973 if (fReqComponents & XSAVE_C_X87)
8974 {
8975 if (fRstorMask & XSAVE_C_X87)
8976 {
8977 pDst->FCW = pSrc->FCW;
8978 pDst->FSW = pSrc->FSW;
8979 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8980 pDst->FOP = pSrc->FOP;
8981 pDst->FPUIP = pSrc->FPUIP;
8982 pDst->CS = pSrc->CS;
8983 pDst->FPUDP = pSrc->FPUDP;
8984 pDst->DS = pSrc->DS;
8985 if (enmEffOpSize == IEMMODE_64BIT)
8986 {
8987 /* Load upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8988 pDst->Rsrvd1 = pSrc->Rsrvd1;
8989 pDst->Rsrvd2 = pSrc->Rsrvd2;
8990 }
8991 else
8992 {
8993 pDst->Rsrvd1 = 0;
8994 pDst->Rsrvd2 = 0;
8995 }
8996 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8997 {
8998 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8999 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
9000 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
9001 pDst->aRegs[i].au32[3] = 0;
9002 }
9003
9004 pDst->FCW &= ~X86_FCW_ZERO_MASK;
9005 iemFpuRecalcExceptionStatus(pDst);
9006
9007 if (pDst->FSW & X86_FSW_ES)
9008 Log11(("xrstor: %04x:%08RX64: loading state with pending FPU exception (FSW=%#x)\n",
9009 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pSrc->FSW));
9010 }
9011 else
9012 {
9013 pDst->FCW = 0x37f;
9014 pDst->FSW = 0;
9015 pDst->FTW = 0x00; /* 0 - empty. */
9016 pDst->FPUDP = 0;
9017 pDst->DS = 0; //??
9018 pDst->Rsrvd2= 0;
9019 pDst->FPUIP = 0;
9020 pDst->CS = 0; //??
9021 pDst->Rsrvd1= 0;
9022 pDst->FOP = 0;
9023 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
9024 {
9025 pDst->aRegs[i].au32[0] = 0;
9026 pDst->aRegs[i].au32[1] = 0;
9027 pDst->aRegs[i].au32[2] = 0;
9028 pDst->aRegs[i].au32[3] = 0;
9029 }
9030 }
9031 pHdrDst->bmXState |= XSAVE_C_X87; /* playing safe for now */
9032 }
9033
9034 /* MXCSR */
9035 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
9036 {
9037 if (fRstorMask & (XSAVE_C_SSE | XSAVE_C_YMM))
9038 pDst->MXCSR = pSrc->MXCSR;
9039 else
9040 pDst->MXCSR = 0x1f80;
9041 }
9042
9043 /* XMM registers. */
9044 if (fReqComponents & XSAVE_C_SSE)
9045 {
9046 if (fRstorMask & XSAVE_C_SSE)
9047 {
9048 for (uint32_t i = 0; i < cXmmRegs; i++)
9049 pDst->aXMM[i] = pSrc->aXMM[i];
9050 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
9051 * right? */
9052 }
9053 else
9054 {
9055 for (uint32_t i = 0; i < cXmmRegs; i++)
9056 {
9057 pDst->aXMM[i].au64[0] = 0;
9058 pDst->aXMM[i].au64[1] = 0;
9059 }
9060 }
9061 pHdrDst->bmXState |= XSAVE_C_SSE; /* playing safe for now */
9062 }
9063
9064 /* Unmap the x87 state bits (so we've don't run out of mapping). */
9065 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
9066 if (rcStrict != VINF_SUCCESS)
9067 return rcStrict;
9068
9069 /*
9070 * Restore AVX state.
9071 */
9072 if (fReqComponents & XSAVE_C_YMM)
9073 {
9074 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
9075 PX86XSAVEYMMHI pCompDst = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PX86XSAVEYMMHI);
9076
9077 if (fRstorMask & XSAVE_C_YMM)
9078 {
9079 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
9080 PCX86XSAVEYMMHI pCompSrc;
9081 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, sizeof(*pCompDst),
9082 iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
9083 IEM_ACCESS_DATA_R, 0 /* checked above */);
9084 if (rcStrict != VINF_SUCCESS)
9085 return rcStrict;
9086
9087 for (uint32_t i = 0; i < cXmmRegs; i++)
9088 {
9089 pCompDst->aYmmHi[i].au64[0] = pCompSrc->aYmmHi[i].au64[0];
9090 pCompDst->aYmmHi[i].au64[1] = pCompSrc->aYmmHi[i].au64[1];
9091 }
9092
9093 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pCompSrc, IEM_ACCESS_DATA_R);
9094 if (rcStrict != VINF_SUCCESS)
9095 return rcStrict;
9096 }
9097 else
9098 {
9099 for (uint32_t i = 0; i < cXmmRegs; i++)
9100 {
9101 pCompDst->aYmmHi[i].au64[0] = 0;
9102 pCompDst->aYmmHi[i].au64[1] = 0;
9103 }
9104 }
9105 pHdrDst->bmXState |= XSAVE_C_YMM; /* playing safe for now */
9106 }
9107
9108 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9109}
9110
9111
9112
9113
9114/**
9115 * Implements 'STMXCSR'.
9116 *
9117 * @param iEffSeg The effective segment register for @a GCPtrEff.
9118 * @param GCPtrEff The address of the image.
9119 */
9120IEM_CIMPL_DEF_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9121{
9122 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
9123
9124 /*
9125 * Raise exceptions.
9126 */
9127 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
9128 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
9129 {
9130 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9131 {
9132 /*
9133 * Do the job.
9134 */
9135 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.XState.x87.MXCSR);
9136 if (rcStrict == VINF_SUCCESS)
9137 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9138 return rcStrict;
9139 }
9140 return iemRaiseDeviceNotAvailable(pVCpu);
9141 }
9142 return iemRaiseUndefinedOpcode(pVCpu);
9143}
9144
9145
9146/**
9147 * Implements 'VSTMXCSR'.
9148 *
9149 * @param iEffSeg The effective segment register for @a GCPtrEff.
9150 * @param GCPtrEff The address of the image.
9151 */
9152IEM_CIMPL_DEF_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9153{
9154 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_XCRx);
9155
9156 /*
9157 * Raise exceptions.
9158 */
9159 if ( ( !IEM_IS_GUEST_CPU_AMD(pVCpu)
9160 ? (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM)
9161 : !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)) /* AMD Jaguar CPU (f0x16,m0,s1) behaviour */
9162 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
9163 {
9164 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9165 {
9166 /*
9167 * Do the job.
9168 */
9169 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.XState.x87.MXCSR);
9170 if (rcStrict == VINF_SUCCESS)
9171 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9172 return rcStrict;
9173 }
9174 return iemRaiseDeviceNotAvailable(pVCpu);
9175 }
9176 return iemRaiseUndefinedOpcode(pVCpu);
9177}
9178
9179
9180/**
9181 * Implements 'LDMXCSR'.
9182 *
9183 * @param iEffSeg The effective segment register for @a GCPtrEff.
9184 * @param GCPtrEff The address of the image.
9185 */
9186IEM_CIMPL_DEF_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9187{
9188 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
9189
9190 /*
9191 * Raise exceptions.
9192 */
9193 /** @todo testcase - order of LDMXCSR faults. Does \#PF, \#GP and \#SS
9194 * happen after or before \#UD and \#EM? */
9195 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
9196 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
9197 {
9198 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9199 {
9200 /*
9201 * Do the job.
9202 */
9203 uint32_t fNewMxCsr;
9204 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, &fNewMxCsr, iEffSeg, GCPtrEff);
9205 if (rcStrict == VINF_SUCCESS)
9206 {
9207 uint32_t const fMxCsrMask = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
9208 if (!(fNewMxCsr & ~fMxCsrMask))
9209 {
9210 pVCpu->cpum.GstCtx.XState.x87.MXCSR = fNewMxCsr;
9211 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9212 }
9213 Log(("ldmxcsr: New MXCSR=%#RX32 & ~MASK=%#RX32 = %#RX32 -> #GP(0)\n",
9214 fNewMxCsr, fMxCsrMask, fNewMxCsr & ~fMxCsrMask));
9215 return iemRaiseGeneralProtectionFault0(pVCpu);
9216 }
9217 return rcStrict;
9218 }
9219 return iemRaiseDeviceNotAvailable(pVCpu);
9220 }
9221 return iemRaiseUndefinedOpcode(pVCpu);
9222}
9223
9224
9225/**
9226 * Commmon routine for fnstenv and fnsave.
9227 *
9228 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9229 * @param enmEffOpSize The effective operand size.
9230 * @param uPtr Where to store the state.
9231 */
9232static void iemCImplCommonFpuStoreEnv(PVMCPUCC pVCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr)
9233{
9234 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9235 PCX86FXSTATE pSrcX87 = &pVCpu->cpum.GstCtx.XState.x87;
9236 if (enmEffOpSize == IEMMODE_16BIT)
9237 {
9238 uPtr.pu16[0] = pSrcX87->FCW;
9239 uPtr.pu16[1] = pSrcX87->FSW;
9240 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87);
9241 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9242 {
9243 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
9244 * protected mode or long mode and we save it in real mode? And vice
9245 * versa? And with 32-bit operand size? I think CPU is storing the
9246 * effective address ((CS << 4) + IP) in the offset register and not
9247 * doing any address calculations here. */
9248 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP;
9249 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP;
9250 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP;
9251 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000);
9252 }
9253 else
9254 {
9255 uPtr.pu16[3] = pSrcX87->FPUIP;
9256 uPtr.pu16[4] = pSrcX87->CS;
9257 uPtr.pu16[5] = pSrcX87->FPUDP;
9258 uPtr.pu16[6] = pSrcX87->DS;
9259 }
9260 }
9261 else
9262 {
9263 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
9264 uPtr.pu16[0*2] = pSrcX87->FCW;
9265 uPtr.pu16[0*2+1] = 0xffff; /* (0xffff observed on intel skylake.) */
9266 uPtr.pu16[1*2] = pSrcX87->FSW;
9267 uPtr.pu16[1*2+1] = 0xffff;
9268 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87);
9269 uPtr.pu16[2*2+1] = 0xffff;
9270 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9271 {
9272 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP;
9273 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP;
9274 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP;
9275 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4;
9276 }
9277 else
9278 {
9279 uPtr.pu32[3] = pSrcX87->FPUIP;
9280 uPtr.pu16[4*2] = pSrcX87->CS;
9281 uPtr.pu16[4*2+1] = pSrcX87->FOP;
9282 uPtr.pu32[5] = pSrcX87->FPUDP;
9283 uPtr.pu16[6*2] = pSrcX87->DS;
9284 uPtr.pu16[6*2+1] = 0xffff;
9285 }
9286 }
9287}
9288
9289
9290/**
9291 * Commmon routine for fldenv and frstor
9292 *
9293 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9294 * @param enmEffOpSize The effective operand size.
9295 * @param uPtr Where to store the state.
9296 */
9297static void iemCImplCommonFpuRestoreEnv(PVMCPUCC pVCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr)
9298{
9299 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9300 PX86FXSTATE pDstX87 = &pVCpu->cpum.GstCtx.XState.x87;
9301 if (enmEffOpSize == IEMMODE_16BIT)
9302 {
9303 pDstX87->FCW = uPtr.pu16[0];
9304 pDstX87->FSW = uPtr.pu16[1];
9305 pDstX87->FTW = uPtr.pu16[2];
9306 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9307 {
9308 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
9309 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
9310 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
9311 pDstX87->CS = 0;
9312 pDstX87->Rsrvd1= 0;
9313 pDstX87->DS = 0;
9314 pDstX87->Rsrvd2= 0;
9315 }
9316 else
9317 {
9318 pDstX87->FPUIP = uPtr.pu16[3];
9319 pDstX87->CS = uPtr.pu16[4];
9320 pDstX87->Rsrvd1= 0;
9321 pDstX87->FPUDP = uPtr.pu16[5];
9322 pDstX87->DS = uPtr.pu16[6];
9323 pDstX87->Rsrvd2= 0;
9324 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
9325 }
9326 }
9327 else
9328 {
9329 pDstX87->FCW = uPtr.pu16[0*2];
9330 pDstX87->FSW = uPtr.pu16[1*2];
9331 pDstX87->FTW = uPtr.pu16[2*2];
9332 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9333 {
9334 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
9335 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
9336 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
9337 pDstX87->CS = 0;
9338 pDstX87->Rsrvd1= 0;
9339 pDstX87->DS = 0;
9340 pDstX87->Rsrvd2= 0;
9341 }
9342 else
9343 {
9344 pDstX87->FPUIP = uPtr.pu32[3];
9345 pDstX87->CS = uPtr.pu16[4*2];
9346 pDstX87->Rsrvd1= 0;
9347 pDstX87->FOP = uPtr.pu16[4*2+1];
9348 pDstX87->FPUDP = uPtr.pu32[5];
9349 pDstX87->DS = uPtr.pu16[6*2];
9350 pDstX87->Rsrvd2= 0;
9351 }
9352 }
9353
9354 /* Make adjustments. */
9355 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW);
9356#ifdef LOG_ENABLED
9357 uint16_t const fOldFsw = pDstX87->FSW;
9358#endif
9359 pDstX87->FCW &= ~X86_FCW_ZERO_MASK;
9360 iemFpuRecalcExceptionStatus(pDstX87);
9361#ifdef LOG_ENABLED
9362 if ((pDstX87->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9363 Log11(("iemCImplCommonFpuRestoreEnv: %04x:%08RX64: %s FPU exception (FCW=%#x FSW=%#x -> %#x)\n",
9364 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fOldFsw & X86_FSW_ES ? "Supressed" : "Raised",
9365 pDstX87->FCW, fOldFsw, pDstX87->FSW));
9366#endif
9367
9368 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
9369 * exceptions are pending after loading the saved state? */
9370}
9371
9372
9373/**
9374 * Implements 'FNSTENV'.
9375 *
9376 * @param enmEffOpSize The operand size (only REX.W really matters).
9377 * @param iEffSeg The effective segment register for @a GCPtrEffDst.
9378 * @param GCPtrEffDst The address of the image.
9379 */
9380IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
9381{
9382 RTPTRUNION uPtr;
9383 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
9384 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
9385 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ? */);
9386 if (rcStrict != VINF_SUCCESS)
9387 return rcStrict;
9388
9389 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr);
9390
9391 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
9392 if (rcStrict != VINF_SUCCESS)
9393 return rcStrict;
9394
9395 /* Mask all math exceptions. Any possibly pending exceptions will be cleared. */
9396 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9397 pFpuCtx->FCW |= X86_FCW_XCPT_MASK;
9398#ifdef LOG_ENABLED
9399 uint16_t fOldFsw = pFpuCtx->FSW;
9400#endif
9401 iemFpuRecalcExceptionStatus(pFpuCtx);
9402#ifdef LOG_ENABLED
9403 if ((pFpuCtx->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9404 Log11(("fnstenv: %04x:%08RX64: %s FPU exception (FCW=%#x, FSW %#x -> %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9405 fOldFsw & X86_FSW_ES ? "Supressed" : "Raised", pFpuCtx->FCW, fOldFsw, pFpuCtx->FSW));
9406#endif
9407
9408 iemHlpUsedFpu(pVCpu);
9409
9410 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
9411 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9412}
9413
9414
9415/**
9416 * Implements 'FNSAVE'.
9417 *
9418 * @param enmEffOpSize The operand size.
9419 * @param iEffSeg The effective segment register for @a GCPtrEffDst.
9420 * @param GCPtrEffDst The address of the image.
9421 */
9422IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
9423{
9424 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9425
9426 RTPTRUNION uPtr;
9427 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
9428 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 3 /** @todo ? */);
9429 if (rcStrict != VINF_SUCCESS)
9430 return rcStrict;
9431
9432 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9433 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr);
9434 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
9435 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
9436 {
9437 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0];
9438 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1];
9439 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4];
9440 }
9441
9442 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
9443 if (rcStrict != VINF_SUCCESS)
9444 return rcStrict;
9445
9446 /* Rotate the stack to account for changed TOS. */
9447 iemFpuRotateStackSetTop(pFpuCtx, 0);
9448
9449 /*
9450 * Re-initialize the FPU context.
9451 */
9452 pFpuCtx->FCW = 0x37f;
9453 pFpuCtx->FSW = 0;
9454 pFpuCtx->FTW = 0x00; /* 0 - empty */
9455 pFpuCtx->FPUDP = 0;
9456 pFpuCtx->DS = 0;
9457 pFpuCtx->Rsrvd2= 0;
9458 pFpuCtx->FPUIP = 0;
9459 pFpuCtx->CS = 0;
9460 pFpuCtx->Rsrvd1= 0;
9461 pFpuCtx->FOP = 0;
9462
9463 iemHlpUsedFpu(pVCpu);
9464 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9465}
9466
9467
9468
9469/**
9470 * Implements 'FLDENV'.
9471 *
9472 * @param enmEffOpSize The operand size (only REX.W really matters).
9473 * @param iEffSeg The effective segment register for @a GCPtrEffSrc.
9474 * @param GCPtrEffSrc The address of the image.
9475 */
9476IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
9477{
9478 RTCPTRUNION uPtr;
9479 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
9480 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R,
9481 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ?*/);
9482 if (rcStrict != VINF_SUCCESS)
9483 return rcStrict;
9484
9485 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr);
9486
9487 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
9488 if (rcStrict != VINF_SUCCESS)
9489 return rcStrict;
9490
9491 iemHlpUsedFpu(pVCpu);
9492 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9493}
9494
9495
9496/**
9497 * Implements 'FRSTOR'.
9498 *
9499 * @param enmEffOpSize The operand size.
9500 * @param iEffSeg The effective segment register for @a GCPtrEffSrc.
9501 * @param GCPtrEffSrc The address of the image.
9502 */
9503IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
9504{
9505 RTCPTRUNION uPtr;
9506 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
9507 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R, 3 /** @todo ?*/ );
9508 if (rcStrict != VINF_SUCCESS)
9509 return rcStrict;
9510
9511 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9512 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr);
9513 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
9514 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
9515 {
9516 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0];
9517 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1];
9518 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4];
9519 pFpuCtx->aRegs[i].au32[3] = 0;
9520 }
9521
9522 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
9523 if (rcStrict != VINF_SUCCESS)
9524 return rcStrict;
9525
9526 iemHlpUsedFpu(pVCpu);
9527 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9528}
9529
9530
9531/**
9532 * Implements 'FLDCW'.
9533 *
9534 * @param u16Fcw The new FCW.
9535 */
9536IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
9537{
9538 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9539
9540 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
9541 /** @todo Testcase: Try see what happens when trying to set undefined bits
9542 * (other than 6 and 7). Currently ignoring them. */
9543 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
9544 * according to FSW. (This is what is currently implemented.) */
9545 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9546 pFpuCtx->FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
9547#ifdef LOG_ENABLED
9548 uint16_t fOldFsw = pFpuCtx->FSW;
9549#endif
9550 iemFpuRecalcExceptionStatus(pFpuCtx);
9551#ifdef LOG_ENABLED
9552 if ((pFpuCtx->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9553 Log11(("fldcw: %04x:%08RX64: %s FPU exception (FCW=%#x, FSW %#x -> %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9554 fOldFsw & X86_FSW_ES ? "Supressed" : "Raised", pFpuCtx->FCW, fOldFsw, pFpuCtx->FSW));
9555#endif
9556
9557 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
9558 iemHlpUsedFpu(pVCpu);
9559 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9560}
9561
9562
9563
9564/**
9565 * Implements the underflow case of fxch.
9566 *
9567 * @param iStReg The other stack register.
9568 */
9569IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
9570{
9571 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9572
9573 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9574 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW);
9575 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
9576 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW));
9577
9578 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
9579 * registers are read as QNaN and then exchanged. This could be
9580 * wrong... */
9581 if (pFpuCtx->FCW & X86_FCW_IM)
9582 {
9583 if (RT_BIT(iReg1) & pFpuCtx->FTW)
9584 {
9585 if (RT_BIT(iReg2) & pFpuCtx->FTW)
9586 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
9587 else
9588 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80;
9589 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
9590 }
9591 else
9592 {
9593 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80;
9594 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
9595 }
9596 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
9597 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
9598 }
9599 else
9600 {
9601 /* raise underflow exception, don't change anything. */
9602 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
9603 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
9604 Log11(("fxch: %04x:%08RX64: Underflow exception (FSW=%#x)\n",
9605 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
9606 }
9607
9608 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
9609 iemHlpUsedFpu(pVCpu);
9610 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9611}
9612
9613
9614/**
9615 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
9616 *
9617 * @param iStReg The other stack register.
9618 * @param pfnAImpl The assembly comparison implementation.
9619 * @param fPop Whether we should pop the stack when done or not.
9620 */
9621IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
9622{
9623 Assert(iStReg < 8);
9624 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9625
9626 /*
9627 * Raise exceptions.
9628 */
9629 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS))
9630 return iemRaiseDeviceNotAvailable(pVCpu);
9631
9632 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9633 uint16_t u16Fsw = pFpuCtx->FSW;
9634 if (u16Fsw & X86_FSW_ES)
9635 return iemRaiseMathFault(pVCpu);
9636
9637 /*
9638 * Check if any of the register accesses causes #SF + #IA.
9639 */
9640 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
9641 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
9642 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
9643 {
9644 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
9645
9646 pFpuCtx->FSW &= ~X86_FSW_C1;
9647 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
9648 if ( !(u16Fsw & X86_FSW_IE)
9649 || (pFpuCtx->FCW & X86_FCW_IM) )
9650 {
9651 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9652 pVCpu->cpum.GstCtx.eflags.u |= u32Eflags & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9653 }
9654 }
9655 else if (pFpuCtx->FCW & X86_FCW_IM)
9656 {
9657 /* Masked underflow. */
9658 pFpuCtx->FSW &= ~X86_FSW_C1;
9659 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
9660 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9661 pVCpu->cpum.GstCtx.eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
9662 }
9663 else
9664 {
9665 /* Raise underflow - don't touch EFLAGS or TOP. */
9666 pFpuCtx->FSW &= ~X86_FSW_C1;
9667 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
9668 Log11(("fxch: %04x:%08RX64: Raising IE+SF exception (FSW=%#x)\n",
9669 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
9670 fPop = false;
9671 }
9672
9673 /*
9674 * Pop if necessary.
9675 */
9676 if (fPop)
9677 {
9678 pFpuCtx->FTW &= ~RT_BIT(iReg1);
9679 iemFpuStackIncTop(pVCpu);
9680 }
9681
9682 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
9683 iemHlpUsedFpu(pVCpu);
9684 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9685}
9686
9687/** @} */
9688
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette