VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp@ 97503

Last change on this file since 97503 was 97503, checked in by vboxsync, 2 years ago

VMM/IEM: Single stepping far calls. Added a bunch of single step todos. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 350.2 KB
Line 
1/* $Id: IEMAllCImpl.cpp 97503 2022-11-11 01:33:27Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/apic.h>
37#include <VBox/vmm/pdm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/iom.h>
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/hm.h>
42#include <VBox/vmm/nem.h>
43#include <VBox/vmm/gim.h>
44#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
45# include <VBox/vmm/em.h>
46# include <VBox/vmm/hm_svm.h>
47#endif
48#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
49# include <VBox/vmm/hmvmxinline.h>
50#endif
51#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
52# include <VBox/vmm/cpuidcall.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72/*********************************************************************************************************************************
73* Defined Constants And Macros *
74*********************************************************************************************************************************/
75/**
76 * Flushes the prefetch buffer, light version.
77 */
78#ifndef IEM_WITH_CODE_TLB
79# define IEM_FLUSH_PREFETCH_LIGHT(a_pVCpu, a_cbInstr) do { (a_pVCpu)->iem.s.cbOpcode = (a_cbInstr); } while (0)
80#else
81# define IEM_FLUSH_PREFETCH_LIGHT(a_pVCpu, a_cbInstr) do { } while (0)
82#endif
83
84/**
85 * Flushes the prefetch buffer, heavy version.
86 */
87#ifndef IEM_WITH_CODE_TLB
88# define IEM_FLUSH_PREFETCH_HEAVY(a_pVCpu, a_cbInstr) do { (a_pVCpu)->iem.s.cbOpcode = (a_cbInstr); } while (0)
89#else
90# define IEM_FLUSH_PREFETCH_HEAVY(a_pVCpu, a_cbInstr) do { (a_pVCpu)->iem.s.pbInstrBuf = NULL; } while (0)
91#endif
92
93
94
95/** @name Misc Helpers
96 * @{
97 */
98
99
100/**
101 * Worker function for iemHlpCheckPortIOPermission, don't call directly.
102 *
103 * @returns Strict VBox status code.
104 *
105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
106 * @param u16Port The port number.
107 * @param cbOperand The operand size.
108 */
109static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PVMCPUCC pVCpu, uint16_t u16Port, uint8_t cbOperand)
110{
111 /* The TSS bits we're interested in are the same on 386 and AMD64. */
112 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY);
113 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
114 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap);
115 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64));
116
117 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
118
119 /*
120 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
121 */
122 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType);
123 if (RT_UNLIKELY( pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY
124 && pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))
125 {
126 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
127 u16Port, cbOperand, pVCpu->cpum.GstCtx.tr.Attr.n.u4Type, pVCpu->cpum.GstCtx.tr.Attr.u));
128 return iemRaiseGeneralProtectionFault0(pVCpu);
129 }
130
131 /*
132 * Read the bitmap offset (may #PF).
133 */
134 uint16_t offBitmap;
135 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &offBitmap, UINT8_MAX,
136 pVCpu->cpum.GstCtx.tr.u64Base + RT_UOFFSETOF(X86TSS64, offIoBitmap));
137 if (rcStrict != VINF_SUCCESS)
138 {
139 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
140 return rcStrict;
141 }
142
143 /*
144 * The bit range from u16Port to (u16Port + cbOperand - 1), however intel
145 * describes the CPU actually reading two bytes regardless of whether the
146 * bit range crosses a byte boundrary. Thus the + 1 in the test below.
147 */
148 uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap;
149 /** @todo check if real CPUs ensures that offBitmap has a minimum value of
150 * for instance sizeof(X86TSS32). */
151 if (offFirstBit + 1 > pVCpu->cpum.GstCtx.tr.u32Limit) /* the limit is inclusive */
152 {
153 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
154 offFirstBit, pVCpu->cpum.GstCtx.tr.u32Limit));
155 return iemRaiseGeneralProtectionFault0(pVCpu);
156 }
157
158 /*
159 * Read the necessary bits.
160 */
161 /** @todo Test the assertion in the intel manual that the CPU reads two
162 * bytes. The question is how this works wrt to \#PF and \#GP on the
163 * 2nd byte when it's not required. */
164 uint16_t bmBytes = UINT16_MAX;
165 rcStrict = iemMemFetchSysU16(pVCpu, &bmBytes, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + offFirstBit);
166 if (rcStrict != VINF_SUCCESS)
167 {
168 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
169 return rcStrict;
170 }
171
172 /*
173 * Perform the check.
174 */
175 uint16_t fPortMask = (1 << cbOperand) - 1;
176 bmBytes >>= (u16Port & 7);
177 if (bmBytes & fPortMask)
178 {
179 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
180 u16Port, cbOperand, bmBytes, fPortMask));
181 return iemRaiseGeneralProtectionFault0(pVCpu);
182 }
183
184 return VINF_SUCCESS;
185}
186
187
188/**
189 * Checks if we are allowed to access the given I/O port, raising the
190 * appropriate exceptions if we aren't (or if the I/O bitmap is not
191 * accessible).
192 *
193 * @returns Strict VBox status code.
194 *
195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
196 * @param u16Port The port number.
197 * @param cbOperand The operand size.
198 */
199DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PVMCPUCC pVCpu, uint16_t u16Port, uint8_t cbOperand)
200{
201 X86EFLAGS Efl;
202 Efl.u = IEMMISC_GET_EFL(pVCpu);
203 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
204 && ( pVCpu->iem.s.uCpl > Efl.Bits.u2IOPL
205 || Efl.Bits.u1VM) )
206 return iemHlpCheckPortIOPermissionBitmap(pVCpu, u16Port, cbOperand);
207 return VINF_SUCCESS;
208}
209
210
211#if 0
212/**
213 * Calculates the parity bit.
214 *
215 * @returns true if the bit is set, false if not.
216 * @param u8Result The least significant byte of the result.
217 */
218static bool iemHlpCalcParityFlag(uint8_t u8Result)
219{
220 /*
221 * Parity is set if the number of bits in the least significant byte of
222 * the result is even.
223 */
224 uint8_t cBits;
225 cBits = u8Result & 1; /* 0 */
226 u8Result >>= 1;
227 cBits += u8Result & 1;
228 u8Result >>= 1;
229 cBits += u8Result & 1;
230 u8Result >>= 1;
231 cBits += u8Result & 1;
232 u8Result >>= 1;
233 cBits += u8Result & 1; /* 4 */
234 u8Result >>= 1;
235 cBits += u8Result & 1;
236 u8Result >>= 1;
237 cBits += u8Result & 1;
238 u8Result >>= 1;
239 cBits += u8Result & 1;
240 return !(cBits & 1);
241}
242#endif /* not used */
243
244
245/**
246 * Updates the specified flags according to a 8-bit result.
247 *
248 * @param pVCpu The cross context virtual CPU structure of the calling thread.
249 * @param u8Result The result to set the flags according to.
250 * @param fToUpdate The flags to update.
251 * @param fUndefined The flags that are specified as undefined.
252 */
253static void iemHlpUpdateArithEFlagsU8(PVMCPUCC pVCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
254{
255 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
256 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
257 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined);
258 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags;
259}
260
261
262/**
263 * Updates the specified flags according to a 16-bit result.
264 *
265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
266 * @param u16Result The result to set the flags according to.
267 * @param fToUpdate The flags to update.
268 * @param fUndefined The flags that are specified as undefined.
269 */
270static void iemHlpUpdateArithEFlagsU16(PVMCPUCC pVCpu, uint16_t u16Result, uint32_t fToUpdate, uint32_t fUndefined)
271{
272 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
273 iemAImpl_test_u16(&u16Result, u16Result, &fEFlags);
274 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined);
275 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags;
276}
277
278
279/**
280 * Helper used by iret.
281 *
282 * @param pVCpu The cross context virtual CPU structure of the calling thread.
283 * @param uCpl The new CPL.
284 * @param pSReg Pointer to the segment register.
285 */
286static void iemHlpAdjustSelectorForNewCpl(PVMCPUCC pVCpu, uint8_t uCpl, PCPUMSELREG pSReg)
287{
288 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
289 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_MASK);
290
291 if ( uCpl > pSReg->Attr.n.u2Dpl
292 && pSReg->Attr.n.u1DescType /* code or data, not system */
293 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
294 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
295 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, 0);
296}
297
298
299/**
300 * Indicates that we have modified the FPU state.
301 *
302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
303 */
304DECLINLINE(void) iemHlpUsedFpu(PVMCPUCC pVCpu)
305{
306 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
307}
308
309/** @} */
310
311/** @name C Implementations
312 * @{
313 */
314
315/**
316 * Implements a 16-bit popa.
317 */
318IEM_CIMPL_DEF_0(iemCImpl_popa_16)
319{
320 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu);
321 RTGCPTR GCPtrLast = GCPtrStart + 15;
322 VBOXSTRICTRC rcStrict;
323
324 /*
325 * The docs are a bit hard to comprehend here, but it looks like we wrap
326 * around in real mode as long as none of the individual "popa" crosses the
327 * end of the stack segment. In protected mode we check the whole access
328 * in one go. For efficiency, only do the word-by-word thing if we're in
329 * danger of wrapping around.
330 */
331 /** @todo do popa boundary / wrap-around checks. */
332 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
333 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
334 {
335 /* word-by-word */
336 RTUINT64U TmpRsp;
337 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
338 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.di, &TmpRsp);
339 if (rcStrict == VINF_SUCCESS)
340 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.si, &TmpRsp);
341 if (rcStrict == VINF_SUCCESS)
342 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bp, &TmpRsp);
343 if (rcStrict == VINF_SUCCESS)
344 {
345 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */
346 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bx, &TmpRsp);
347 }
348 if (rcStrict == VINF_SUCCESS)
349 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.dx, &TmpRsp);
350 if (rcStrict == VINF_SUCCESS)
351 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.cx, &TmpRsp);
352 if (rcStrict == VINF_SUCCESS)
353 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.ax, &TmpRsp);
354 if (rcStrict == VINF_SUCCESS)
355 {
356 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
357 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
358 }
359 }
360 else
361 {
362 uint16_t const *pa16Mem = NULL;
363 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa16Mem) - 1);
364 if (rcStrict == VINF_SUCCESS)
365 {
366 pVCpu->cpum.GstCtx.di = pa16Mem[7 - X86_GREG_xDI];
367 pVCpu->cpum.GstCtx.si = pa16Mem[7 - X86_GREG_xSI];
368 pVCpu->cpum.GstCtx.bp = pa16Mem[7 - X86_GREG_xBP];
369 /* skip sp */
370 pVCpu->cpum.GstCtx.bx = pa16Mem[7 - X86_GREG_xBX];
371 pVCpu->cpum.GstCtx.dx = pa16Mem[7 - X86_GREG_xDX];
372 pVCpu->cpum.GstCtx.cx = pa16Mem[7 - X86_GREG_xCX];
373 pVCpu->cpum.GstCtx.ax = pa16Mem[7 - X86_GREG_xAX];
374 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
375 if (rcStrict == VINF_SUCCESS)
376 {
377 iemRegAddToRsp(pVCpu, 16);
378 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
379 }
380 }
381 }
382 return rcStrict;
383}
384
385
386/**
387 * Implements a 32-bit popa.
388 */
389IEM_CIMPL_DEF_0(iemCImpl_popa_32)
390{
391 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu);
392 RTGCPTR GCPtrLast = GCPtrStart + 31;
393 VBOXSTRICTRC rcStrict;
394
395 /*
396 * The docs are a bit hard to comprehend here, but it looks like we wrap
397 * around in real mode as long as none of the individual "popa" crosses the
398 * end of the stack segment. In protected mode we check the whole access
399 * in one go. For efficiency, only do the word-by-word thing if we're in
400 * danger of wrapping around.
401 */
402 /** @todo do popa boundary / wrap-around checks. */
403 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
404 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
405 {
406 /* word-by-word */
407 RTUINT64U TmpRsp;
408 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
409 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edi, &TmpRsp);
410 if (rcStrict == VINF_SUCCESS)
411 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.esi, &TmpRsp);
412 if (rcStrict == VINF_SUCCESS)
413 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebp, &TmpRsp);
414 if (rcStrict == VINF_SUCCESS)
415 {
416 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */
417 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebx, &TmpRsp);
418 }
419 if (rcStrict == VINF_SUCCESS)
420 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edx, &TmpRsp);
421 if (rcStrict == VINF_SUCCESS)
422 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ecx, &TmpRsp);
423 if (rcStrict == VINF_SUCCESS)
424 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.eax, &TmpRsp);
425 if (rcStrict == VINF_SUCCESS)
426 {
427#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
428 pVCpu->cpum.GstCtx.rdi &= UINT32_MAX;
429 pVCpu->cpum.GstCtx.rsi &= UINT32_MAX;
430 pVCpu->cpum.GstCtx.rbp &= UINT32_MAX;
431 pVCpu->cpum.GstCtx.rbx &= UINT32_MAX;
432 pVCpu->cpum.GstCtx.rdx &= UINT32_MAX;
433 pVCpu->cpum.GstCtx.rcx &= UINT32_MAX;
434 pVCpu->cpum.GstCtx.rax &= UINT32_MAX;
435#endif
436 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
437 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
438 }
439 }
440 else
441 {
442 uint32_t const *pa32Mem;
443 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa32Mem) - 1);
444 if (rcStrict == VINF_SUCCESS)
445 {
446 pVCpu->cpum.GstCtx.rdi = pa32Mem[7 - X86_GREG_xDI];
447 pVCpu->cpum.GstCtx.rsi = pa32Mem[7 - X86_GREG_xSI];
448 pVCpu->cpum.GstCtx.rbp = pa32Mem[7 - X86_GREG_xBP];
449 /* skip esp */
450 pVCpu->cpum.GstCtx.rbx = pa32Mem[7 - X86_GREG_xBX];
451 pVCpu->cpum.GstCtx.rdx = pa32Mem[7 - X86_GREG_xDX];
452 pVCpu->cpum.GstCtx.rcx = pa32Mem[7 - X86_GREG_xCX];
453 pVCpu->cpum.GstCtx.rax = pa32Mem[7 - X86_GREG_xAX];
454 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
455 if (rcStrict == VINF_SUCCESS)
456 {
457 iemRegAddToRsp(pVCpu, 32);
458 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
459 }
460 }
461 }
462 return rcStrict;
463}
464
465
466/**
467 * Implements a 16-bit pusha.
468 */
469IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
470{
471 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu);
472 RTGCPTR GCPtrBottom = GCPtrTop - 15;
473 VBOXSTRICTRC rcStrict;
474
475 /*
476 * The docs are a bit hard to comprehend here, but it looks like we wrap
477 * around in real mode as long as none of the individual "pushd" crosses the
478 * end of the stack segment. In protected mode we check the whole access
479 * in one go. For efficiency, only do the word-by-word thing if we're in
480 * danger of wrapping around.
481 */
482 /** @todo do pusha boundary / wrap-around checks. */
483 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
484 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
485 {
486 /* word-by-word */
487 RTUINT64U TmpRsp;
488 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
489 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.ax, &TmpRsp);
490 if (rcStrict == VINF_SUCCESS)
491 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.cx, &TmpRsp);
492 if (rcStrict == VINF_SUCCESS)
493 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.dx, &TmpRsp);
494 if (rcStrict == VINF_SUCCESS)
495 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bx, &TmpRsp);
496 if (rcStrict == VINF_SUCCESS)
497 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.sp, &TmpRsp);
498 if (rcStrict == VINF_SUCCESS)
499 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bp, &TmpRsp);
500 if (rcStrict == VINF_SUCCESS)
501 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.si, &TmpRsp);
502 if (rcStrict == VINF_SUCCESS)
503 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.di, &TmpRsp);
504 if (rcStrict == VINF_SUCCESS)
505 {
506 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
507 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
508 }
509 }
510 else
511 {
512 GCPtrBottom--;
513 uint16_t *pa16Mem = NULL;
514 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa16Mem) - 1);
515 if (rcStrict == VINF_SUCCESS)
516 {
517 pa16Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.di;
518 pa16Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.si;
519 pa16Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.bp;
520 pa16Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.sp;
521 pa16Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.bx;
522 pa16Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.dx;
523 pa16Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.cx;
524 pa16Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.ax;
525 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
526 if (rcStrict == VINF_SUCCESS)
527 {
528 iemRegSubFromRsp(pVCpu, 16);
529 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
530 }
531 }
532 }
533 return rcStrict;
534}
535
536
537/**
538 * Implements a 32-bit pusha.
539 */
540IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
541{
542 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu);
543 RTGCPTR GCPtrBottom = GCPtrTop - 31;
544 VBOXSTRICTRC rcStrict;
545
546 /*
547 * The docs are a bit hard to comprehend here, but it looks like we wrap
548 * around in real mode as long as none of the individual "pusha" crosses the
549 * end of the stack segment. In protected mode we check the whole access
550 * in one go. For efficiency, only do the word-by-word thing if we're in
551 * danger of wrapping around.
552 */
553 /** @todo do pusha boundary / wrap-around checks. */
554 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
555 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
556 {
557 /* word-by-word */
558 RTUINT64U TmpRsp;
559 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
560 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.eax, &TmpRsp);
561 if (rcStrict == VINF_SUCCESS)
562 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ecx, &TmpRsp);
563 if (rcStrict == VINF_SUCCESS)
564 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edx, &TmpRsp);
565 if (rcStrict == VINF_SUCCESS)
566 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebx, &TmpRsp);
567 if (rcStrict == VINF_SUCCESS)
568 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esp, &TmpRsp);
569 if (rcStrict == VINF_SUCCESS)
570 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebp, &TmpRsp);
571 if (rcStrict == VINF_SUCCESS)
572 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esi, &TmpRsp);
573 if (rcStrict == VINF_SUCCESS)
574 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edi, &TmpRsp);
575 if (rcStrict == VINF_SUCCESS)
576 {
577 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
578 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
579 }
580 }
581 else
582 {
583 GCPtrBottom--;
584 uint32_t *pa32Mem;
585 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa32Mem) - 1);
586 if (rcStrict == VINF_SUCCESS)
587 {
588 pa32Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.edi;
589 pa32Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.esi;
590 pa32Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.ebp;
591 pa32Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.esp;
592 pa32Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.ebx;
593 pa32Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.edx;
594 pa32Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.ecx;
595 pa32Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.eax;
596 rcStrict = iemMemCommitAndUnmap(pVCpu, pa32Mem, IEM_ACCESS_STACK_W);
597 if (rcStrict == VINF_SUCCESS)
598 {
599 iemRegSubFromRsp(pVCpu, 32);
600 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
601 }
602 }
603 }
604 return rcStrict;
605}
606
607
608/**
609 * Implements pushf.
610 *
611 *
612 * @param enmEffOpSize The effective operand size.
613 */
614IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
615{
616 VBOXSTRICTRC rcStrict;
617
618 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_PUSHF))
619 {
620 Log2(("pushf: Guest intercept -> #VMEXIT\n"));
621 IEM_SVM_UPDATE_NRIP(pVCpu);
622 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
623 }
624
625 /*
626 * If we're in V8086 mode some care is required (which is why we're in
627 * doing this in a C implementation).
628 */
629 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
630 if ( (fEfl & X86_EFL_VM)
631 && X86_EFL_GET_IOPL(fEfl) != 3 )
632 {
633 Assert(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE);
634 if ( enmEffOpSize != IEMMODE_16BIT
635 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME))
636 return iemRaiseGeneralProtectionFault0(pVCpu);
637 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
638 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
639 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
640 }
641 else
642 {
643
644 /*
645 * Ok, clear RF and VM, adjust for ancient CPUs, and push the flags.
646 */
647 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
648
649 switch (enmEffOpSize)
650 {
651 case IEMMODE_16BIT:
652 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
653 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_186)
654 fEfl |= UINT16_C(0xf000);
655 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
656 break;
657 case IEMMODE_32BIT:
658 rcStrict = iemMemStackPushU32(pVCpu, fEfl);
659 break;
660 case IEMMODE_64BIT:
661 rcStrict = iemMemStackPushU64(pVCpu, fEfl);
662 break;
663 IEM_NOT_REACHED_DEFAULT_CASE_RET();
664 }
665 }
666
667 if (rcStrict == VINF_SUCCESS)
668 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
669 return rcStrict;
670}
671
672
673/**
674 * Implements popf.
675 *
676 * @param enmEffOpSize The effective operand size.
677 */
678IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
679{
680 uint32_t const fEflOld = IEMMISC_GET_EFL(pVCpu);
681 VBOXSTRICTRC rcStrict;
682 uint32_t fEflNew;
683
684 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_POPF))
685 {
686 Log2(("popf: Guest intercept -> #VMEXIT\n"));
687 IEM_SVM_UPDATE_NRIP(pVCpu);
688 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
689 }
690
691 /*
692 * V8086 is special as usual.
693 */
694 if (fEflOld & X86_EFL_VM)
695 {
696 /*
697 * Almost anything goes if IOPL is 3.
698 */
699 if (X86_EFL_GET_IOPL(fEflOld) == 3)
700 {
701 switch (enmEffOpSize)
702 {
703 case IEMMODE_16BIT:
704 {
705 uint16_t u16Value;
706 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
707 if (rcStrict != VINF_SUCCESS)
708 return rcStrict;
709 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
710 break;
711 }
712 case IEMMODE_32BIT:
713 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
714 if (rcStrict != VINF_SUCCESS)
715 return rcStrict;
716 break;
717 IEM_NOT_REACHED_DEFAULT_CASE_RET();
718 }
719
720 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
721 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
722 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
723 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
724 }
725 /*
726 * Interrupt flag virtualization with CR4.VME=1.
727 */
728 else if ( enmEffOpSize == IEMMODE_16BIT
729 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) )
730 {
731 uint16_t u16Value;
732 RTUINT64U TmpRsp;
733 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
734 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp);
735 if (rcStrict != VINF_SUCCESS)
736 return rcStrict;
737
738 /** @todo Is the popf VME \#GP(0) delivered after updating RSP+RIP
739 * or before? */
740 if ( ( (u16Value & X86_EFL_IF)
741 && (fEflOld & X86_EFL_VIP))
742 || (u16Value & X86_EFL_TF) )
743 return iemRaiseGeneralProtectionFault0(pVCpu);
744
745 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
746 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
747 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
748 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
749
750 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
751 }
752 else
753 return iemRaiseGeneralProtectionFault0(pVCpu);
754
755 }
756 /*
757 * Not in V8086 mode.
758 */
759 else
760 {
761 /* Pop the flags. */
762 switch (enmEffOpSize)
763 {
764 case IEMMODE_16BIT:
765 {
766 uint16_t u16Value;
767 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
768 if (rcStrict != VINF_SUCCESS)
769 return rcStrict;
770 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
771
772 /*
773 * Ancient CPU adjustments:
774 * - 8086, 80186, V20/30:
775 * Fixed bits 15:12 bits are not kept correctly internally, mostly for
776 * practical reasons (masking below). We add them when pushing flags.
777 * - 80286:
778 * The NT and IOPL flags cannot be popped from real mode and are
779 * therefore always zero (since a 286 can never exit from PM and
780 * their initial value is zero). This changed on a 386 and can
781 * therefore be used to detect 286 or 386 CPU in real mode.
782 */
783 if ( IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286
784 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
785 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL);
786 break;
787 }
788 case IEMMODE_32BIT:
789 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
790 if (rcStrict != VINF_SUCCESS)
791 return rcStrict;
792 break;
793 case IEMMODE_64BIT:
794 {
795 uint64_t u64Value;
796 rcStrict = iemMemStackPopU64(pVCpu, &u64Value);
797 if (rcStrict != VINF_SUCCESS)
798 return rcStrict;
799 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
800 break;
801 }
802 IEM_NOT_REACHED_DEFAULT_CASE_RET();
803 }
804
805 /* Merge them with the current flags. */
806 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
807 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
808 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
809 || pVCpu->iem.s.uCpl == 0)
810 {
811 fEflNew &= fPopfBits;
812 fEflNew |= ~fPopfBits & fEflOld;
813 }
814 else if (pVCpu->iem.s.uCpl <= X86_EFL_GET_IOPL(fEflOld))
815 {
816 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
817 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
818 }
819 else
820 {
821 fEflNew &= fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF);
822 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
823 }
824 }
825
826 /*
827 * Commit the flags.
828 */
829 Assert(fEflNew & RT_BIT_32(1));
830 IEMMISC_SET_EFL(pVCpu, fEflNew);
831 return iemRegAddToRipAndFinishingClearingRfEx(pVCpu, cbInstr, fEflOld);
832}
833
834
835/**
836 * Implements an indirect call.
837 *
838 * @param uNewPC The new program counter (RIP) value (loaded from the
839 * operand).
840 */
841IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
842{
843 uint16_t const uOldPC = pVCpu->cpum.GstCtx.ip + cbInstr;
844 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)
845 {
846 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
847 if (rcStrict == VINF_SUCCESS)
848 {
849 pVCpu->cpum.GstCtx.rip = uNewPC;
850 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
851 return iemRegFinishClearingRF(pVCpu);
852 }
853 return rcStrict;
854 }
855 return iemRaiseGeneralProtectionFault0(pVCpu);
856}
857
858
859/**
860 * Implements a 16-bit relative call.
861 *
862 * @param offDisp The displacment offset.
863 */
864IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
865{
866 uint16_t const uOldPC = pVCpu->cpum.GstCtx.ip + cbInstr;
867 uint16_t const uNewPC = uOldPC + offDisp;
868 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)
869 {
870 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);
871 if (rcStrict == VINF_SUCCESS)
872 {
873 pVCpu->cpum.GstCtx.rip = uNewPC;
874 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
875 return iemRegFinishClearingRF(pVCpu);
876 }
877 return rcStrict;
878 }
879 return iemRaiseGeneralProtectionFault0(pVCpu);
880}
881
882
883/**
884 * Implements a 32-bit indirect call.
885 *
886 * @param uNewPC The new program counter (RIP) value (loaded from the
887 * operand).
888 */
889IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
890{
891 uint32_t const uOldPC = pVCpu->cpum.GstCtx.eip + cbInstr;
892 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)
893 {
894 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
895 if (rcStrict == VINF_SUCCESS)
896 {
897 pVCpu->cpum.GstCtx.rip = uNewPC;
898 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
899 return iemRegFinishClearingRF(pVCpu);
900 }
901 return rcStrict;
902 }
903 return iemRaiseGeneralProtectionFault0(pVCpu);
904}
905
906
907/**
908 * Implements a 32-bit relative call.
909 *
910 * @param offDisp The displacment offset.
911 */
912IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
913{
914 uint32_t const uOldPC = pVCpu->cpum.GstCtx.eip + cbInstr;
915 uint32_t const uNewPC = uOldPC + offDisp;
916 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)
917 {
918 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);
919 if (rcStrict == VINF_SUCCESS)
920 {
921 pVCpu->cpum.GstCtx.rip = uNewPC;
922 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
923 return iemRegFinishClearingRF(pVCpu);
924 }
925 return rcStrict;
926 }
927 return iemRaiseGeneralProtectionFault0(pVCpu);
928}
929
930
931/**
932 * Implements a 64-bit indirect call.
933 *
934 * @param uNewPC The new program counter (RIP) value (loaded from the
935 * operand).
936 */
937IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
938{
939 uint64_t const uOldPC = pVCpu->cpum.GstCtx.rip + cbInstr;
940 if (IEM_IS_CANONICAL(uNewPC))
941 {
942 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
943 if (rcStrict == VINF_SUCCESS)
944 {
945 pVCpu->cpum.GstCtx.rip = uNewPC;
946 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
947 return iemRegFinishClearingRF(pVCpu);
948 }
949 return rcStrict;
950 }
951 return iemRaiseGeneralProtectionFault0(pVCpu);
952}
953
954
955/**
956 * Implements a 64-bit relative call.
957 *
958 * @param offDisp The displacment offset.
959 */
960IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
961{
962 uint64_t const uOldPC = pVCpu->cpum.GstCtx.rip + cbInstr;
963 uint64_t const uNewPC = uOldPC + offDisp;
964 if (IEM_IS_CANONICAL(uNewPC))
965 {
966 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);
967 if (rcStrict == VINF_SUCCESS)
968 {
969 pVCpu->cpum.GstCtx.rip = uNewPC;
970 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);
971 return iemRegFinishClearingRF(pVCpu);
972 }
973 return rcStrict;
974 }
975 return iemRaiseNotCanonical(pVCpu);
976}
977
978
979/**
980 * Implements far jumps and calls thru task segments (TSS).
981 *
982 * @returns VBox strict status code.
983 * @param pVCpu The cross context virtual CPU structure of the
984 * calling thread.
985 * @param cbInstr The current instruction length.
986 * @param uSel The selector.
987 * @param enmBranch The kind of branching we're performing.
988 * @param enmEffOpSize The effective operand size.
989 * @param pDesc The descriptor corresponding to @a uSel. The type is
990 * task gate.
991 */
992static VBOXSTRICTRC iemCImpl_BranchTaskSegment(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
993 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
994{
995#ifndef IEM_IMPLEMENTS_TASKSWITCH
996 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
997#else
998 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
999 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
1000 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
1001 RT_NOREF_PV(enmEffOpSize);
1002 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1003
1004 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
1005 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1006 {
1007 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1008 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1009 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1010 }
1011
1012 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
1013 * far calls (see iemCImpl_callf). Most likely in both cases it should be
1014 * checked here, need testcases. */
1015 if (!pDesc->Legacy.Gen.u1Present)
1016 {
1017 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel));
1018 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1019 }
1020
1021 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
1022 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1023 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc);
1024#endif
1025}
1026
1027
1028/**
1029 * Implements far jumps and calls thru task gates.
1030 *
1031 * @returns VBox strict status code.
1032 * @param pVCpu The cross context virtual CPU structure of the
1033 * calling thread.
1034 * @param cbInstr The current instruction length.
1035 * @param uSel The selector.
1036 * @param enmBranch The kind of branching we're performing.
1037 * @param enmEffOpSize The effective operand size.
1038 * @param pDesc The descriptor corresponding to @a uSel. The type is
1039 * task gate.
1040 */
1041static VBOXSTRICTRC iemCImpl_BranchTaskGate(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
1042 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
1043{
1044#ifndef IEM_IMPLEMENTS_TASKSWITCH
1045 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1046#else
1047 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1048 RT_NOREF_PV(enmEffOpSize);
1049 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1050
1051 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
1052 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1053 {
1054 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1055 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1056 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1057 }
1058
1059 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
1060 * far calls (see iemCImpl_callf). Most likely in both cases it should be
1061 * checked here, need testcases. */
1062 if (!pDesc->Legacy.Gen.u1Present)
1063 {
1064 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel));
1065 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1066 }
1067
1068 /*
1069 * Fetch the new TSS descriptor from the GDT.
1070 */
1071 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel;
1072 if (uSelTss & X86_SEL_LDT)
1073 {
1074 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss));
1075 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1076 }
1077
1078 IEMSELDESC TssDesc;
1079 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelTss, X86_XCPT_GP);
1080 if (rcStrict != VINF_SUCCESS)
1081 return rcStrict;
1082
1083 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK)
1084 {
1085 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss,
1086 TssDesc.Legacy.Gate.u4Type));
1087 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1088 }
1089
1090 if (!TssDesc.Legacy.Gate.u1Present)
1091 {
1092 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss));
1093 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelTss & X86_SEL_MASK_OFF_RPL);
1094 }
1095
1096 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
1097 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1098 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc);
1099#endif
1100}
1101
1102
1103/**
1104 * Implements far jumps and calls thru call gates.
1105 *
1106 * @returns VBox strict status code.
1107 * @param pVCpu The cross context virtual CPU structure of the
1108 * calling thread.
1109 * @param cbInstr The current instruction length.
1110 * @param uSel The selector.
1111 * @param enmBranch The kind of branching we're performing.
1112 * @param enmEffOpSize The effective operand size.
1113 * @param pDesc The descriptor corresponding to @a uSel. The type is
1114 * call gate.
1115 */
1116static VBOXSTRICTRC iemCImpl_BranchCallGate(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
1117 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
1118{
1119#define IEM_IMPLEMENTS_CALLGATE
1120#ifndef IEM_IMPLEMENTS_CALLGATE
1121 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1122#else
1123 RT_NOREF_PV(enmEffOpSize);
1124 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1125
1126 /* NB: Far jumps can only do intra-privilege transfers. Far calls support
1127 * inter-privilege calls and are much more complex.
1128 *
1129 * NB: 64-bit call gate has the same type as a 32-bit call gate! If
1130 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
1131 * must be 16-bit or 32-bit.
1132 */
1133 /** @todo effective operand size is probably irrelevant here, only the
1134 * call gate bitness matters??
1135 */
1136 VBOXSTRICTRC rcStrict;
1137 RTPTRUNION uPtrRet;
1138 uint64_t uNewRsp;
1139 uint64_t uNewRip;
1140 uint64_t u64Base;
1141 uint32_t cbLimit;
1142 RTSEL uNewCS;
1143 IEMSELDESC DescCS;
1144
1145 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE);
1146 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1147 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE
1148 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE);
1149
1150 /* Determine the new instruction pointer from the gate descriptor. */
1151 uNewRip = pDesc->Legacy.Gate.u16OffsetLow
1152 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16)
1153 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32);
1154
1155 /* Perform DPL checks on the gate descriptor. */
1156 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl
1157 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1158 {
1159 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1160 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL)));
1161 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1162 }
1163
1164 /** @todo does this catch NULL selectors, too? */
1165 if (!pDesc->Legacy.Gen.u1Present)
1166 {
1167 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel));
1168 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1169 }
1170
1171 /*
1172 * Fetch the target CS descriptor from the GDT or LDT.
1173 */
1174 uNewCS = pDesc->Legacy.Gate.u16Sel;
1175 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_GP);
1176 if (rcStrict != VINF_SUCCESS)
1177 return rcStrict;
1178
1179 /* Target CS must be a code selector. */
1180 if ( !DescCS.Legacy.Gen.u1DescType
1181 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1182 {
1183 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1184 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1185 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1186 }
1187
1188 /* Privilege checks on target CS. */
1189 if (enmBranch == IEMBRANCH_JUMP)
1190 {
1191 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1192 {
1193 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
1194 {
1195 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1196 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1197 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1198 }
1199 }
1200 else
1201 {
1202 if (DescCS.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl)
1203 {
1204 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1205 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1206 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1207 }
1208 }
1209 }
1210 else
1211 {
1212 Assert(enmBranch == IEMBRANCH_CALL);
1213 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
1214 {
1215 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1216 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1217 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1218 }
1219 }
1220
1221 /* Additional long mode checks. */
1222 if (IEM_IS_LONG_MODE(pVCpu))
1223 {
1224 if (!DescCS.Legacy.Gen.u1Long)
1225 {
1226 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS));
1227 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1228 }
1229
1230 /* L vs D. */
1231 if ( DescCS.Legacy.Gen.u1Long
1232 && DescCS.Legacy.Gen.u1DefBig)
1233 {
1234 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS));
1235 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1236 }
1237 }
1238
1239 if (!DescCS.Legacy.Gate.u1Present)
1240 {
1241 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS));
1242 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCS);
1243 }
1244
1245 if (enmBranch == IEMBRANCH_JUMP)
1246 {
1247 /** @todo This is very similar to regular far jumps; merge! */
1248 /* Jumps are fairly simple... */
1249
1250 /* Chop the high bits off if 16-bit gate (Intel says so). */
1251 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1252 uNewRip = (uint16_t)uNewRip;
1253
1254 /* Limit check for non-long segments. */
1255 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1256 if (DescCS.Legacy.Gen.u1Long)
1257 u64Base = 0;
1258 else
1259 {
1260 if (uNewRip > cbLimit)
1261 {
1262 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
1263 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1264 }
1265 u64Base = X86DESC_BASE(&DescCS.Legacy);
1266 }
1267
1268 /* Canonical address check. */
1269 if (!IEM_IS_CANONICAL(uNewRip))
1270 {
1271 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1272 return iemRaiseNotCanonical(pVCpu);
1273 }
1274
1275 /*
1276 * Ok, everything checked out fine. Now set the accessed bit before
1277 * committing the result into CS, CSHID and RIP.
1278 */
1279 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1280 {
1281 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1282 if (rcStrict != VINF_SUCCESS)
1283 return rcStrict;
1284 /** @todo check what VT-x and AMD-V does. */
1285 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1286 }
1287
1288 /* commit */
1289 pVCpu->cpum.GstCtx.rip = uNewRip;
1290 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1291 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */
1292 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1293 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1294 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1295 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1296 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1297 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1298 }
1299 else
1300 {
1301 Assert(enmBranch == IEMBRANCH_CALL);
1302 /* Calls are much more complicated. */
1303
1304 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) && (DescCS.Legacy.Gen.u2Dpl < pVCpu->iem.s.uCpl))
1305 {
1306 uint16_t offNewStack; /* Offset of new stack in TSS. */
1307 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */
1308 uint8_t uNewCSDpl;
1309 uint8_t cbWords;
1310 RTSEL uNewSS;
1311 RTSEL uOldSS;
1312 uint64_t uOldRsp;
1313 IEMSELDESC DescSS;
1314 RTPTRUNION uPtrTSS;
1315 RTGCPTR GCPtrTSS;
1316 RTPTRUNION uPtrParmWds;
1317 RTGCPTR GCPtrParmWds;
1318
1319 /* More privilege. This is the fun part. */
1320 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */
1321
1322 /*
1323 * Determine new SS:rSP from the TSS.
1324 */
1325 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType);
1326
1327 /* Figure out where the new stack pointer is stored in the TSS. */
1328 uNewCSDpl = DescCS.Legacy.Gen.u2Dpl;
1329 if (!IEM_IS_LONG_MODE(pVCpu))
1330 {
1331 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1332 {
1333 offNewStack = RT_UOFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8;
1334 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0);
1335 }
1336 else
1337 {
1338 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1339 offNewStack = RT_UOFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4;
1340 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0);
1341 }
1342 }
1343 else
1344 {
1345 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1346 offNewStack = RT_UOFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0);
1347 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0);
1348 }
1349
1350 /* Check against TSS limit. */
1351 if ((uint16_t)(offNewStack + cbNewStack - 1) > pVCpu->cpum.GstCtx.tr.u32Limit)
1352 {
1353 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pVCpu->cpum.GstCtx.tr.u32Limit));
1354 return iemRaiseTaskSwitchFaultBySelector(pVCpu, pVCpu->cpum.GstCtx.tr.Sel);
1355 }
1356
1357 GCPtrTSS = pVCpu->cpum.GstCtx.tr.u64Base + offNewStack;
1358 rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R, 0);
1359 if (rcStrict != VINF_SUCCESS)
1360 {
1361 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1362 return rcStrict;
1363 }
1364
1365 if (!IEM_IS_LONG_MODE(pVCpu))
1366 {
1367 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1368 {
1369 uNewRsp = uPtrTSS.pu32[0];
1370 uNewSS = uPtrTSS.pu16[2];
1371 }
1372 else
1373 {
1374 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1375 uNewRsp = uPtrTSS.pu16[0];
1376 uNewSS = uPtrTSS.pu16[1];
1377 }
1378 }
1379 else
1380 {
1381 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1382 /* SS will be a NULL selector, but that's valid. */
1383 uNewRsp = uPtrTSS.pu64[0];
1384 uNewSS = uNewCSDpl;
1385 }
1386
1387 /* Done with the TSS now. */
1388 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);
1389 if (rcStrict != VINF_SUCCESS)
1390 {
1391 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1392 return rcStrict;
1393 }
1394
1395 /* Only used outside of long mode. */
1396 cbWords = pDesc->Legacy.Gate.u5ParmCount;
1397
1398 /* If EFER.LMA is 0, there's extra work to do. */
1399 if (!IEM_IS_LONG_MODE(pVCpu))
1400 {
1401 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
1402 {
1403 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
1404 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1405 }
1406
1407 /* Grab the new SS descriptor. */
1408 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1409 if (rcStrict != VINF_SUCCESS)
1410 return rcStrict;
1411
1412 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */
1413 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL))
1414 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl))
1415 {
1416 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n",
1417 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl));
1418 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1419 }
1420
1421 /* Ensure new SS is a writable data segment. */
1422 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
1423 {
1424 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
1425 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1426 }
1427
1428 if (!DescSS.Legacy.Gen.u1Present)
1429 {
1430 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS));
1431 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
1432 }
1433 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1434 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords);
1435 else
1436 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords);
1437 }
1438 else
1439 {
1440 /* Just grab the new (NULL) SS descriptor. */
1441 /** @todo testcase: Check whether the zero GDT entry is actually loaded here
1442 * like we do... */
1443 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1444 if (rcStrict != VINF_SUCCESS)
1445 return rcStrict;
1446
1447 cbNewStack = sizeof(uint64_t) * 4;
1448 }
1449
1450 /** @todo According to Intel, new stack is checked for enough space first,
1451 * then switched. According to AMD, the stack is switched first and
1452 * then pushes might fault!
1453 * NB: OS/2 Warp 3/4 actively relies on the fact that possible
1454 * incoming stack \#PF happens before actual stack switch. AMD is
1455 * either lying or implicitly assumes that new state is committed
1456 * only if and when an instruction doesn't fault.
1457 */
1458
1459 /** @todo According to AMD, CS is loaded first, then SS.
1460 * According to Intel, it's the other way around!?
1461 */
1462
1463 /** @todo Intel and AMD disagree on when exactly the CPL changes! */
1464
1465 /* Set the accessed bit before committing new SS. */
1466 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1467 {
1468 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
1469 if (rcStrict != VINF_SUCCESS)
1470 return rcStrict;
1471 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1472 }
1473
1474 /* Remember the old SS:rSP and their linear address. */
1475 uOldSS = pVCpu->cpum.GstCtx.ss.Sel;
1476 uOldRsp = pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig ? pVCpu->cpum.GstCtx.rsp : pVCpu->cpum.GstCtx.sp;
1477
1478 GCPtrParmWds = pVCpu->cpum.GstCtx.ss.u64Base + uOldRsp;
1479
1480 /* HACK ALERT! Probe if the write to the new stack will succeed. May #SS(NewSS)
1481 or #PF, the former is not implemented in this workaround. */
1482 /** @todo Proper fix callgate target stack exceptions. */
1483 /** @todo testcase: Cover callgates with partially or fully inaccessible
1484 * target stacks. */
1485 void *pvNewFrame;
1486 RTGCPTR GCPtrNewStack = X86DESC_BASE(&DescSS.Legacy) + uNewRsp - cbNewStack;
1487 rcStrict = iemMemMap(pVCpu, &pvNewFrame, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW, 0);
1488 if (rcStrict != VINF_SUCCESS)
1489 {
1490 Log(("BranchCallGate: Incoming stack (%04x:%08RX64) not accessible, rc=%Rrc\n", uNewSS, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
1491 return rcStrict;
1492 }
1493 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewFrame, IEM_ACCESS_SYS_RW);
1494 if (rcStrict != VINF_SUCCESS)
1495 {
1496 Log(("BranchCallGate: New stack probe unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1497 return rcStrict;
1498 }
1499
1500 /* Commit new SS:rSP. */
1501 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
1502 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
1503 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1504 pVCpu->cpum.GstCtx.ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);
1505 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1506 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1507 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1508 pVCpu->iem.s.uCpl = uNewCSDpl; /** @todo is the parameter words accessed using the new CPL or the old CPL? */
1509 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1510 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
1511
1512 /* At this point the stack access must not fail because new state was already committed. */
1513 /** @todo this can still fail due to SS.LIMIT not check. */
1514 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbNewStack,
1515 IEM_IS_LONG_MODE(pVCpu) ? 7
1516 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 1,
1517 &uPtrRet.pv, &uNewRsp);
1518 AssertMsgReturn(rcStrict == VINF_SUCCESS, ("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)),
1519 VERR_INTERNAL_ERROR_5);
1520
1521 if (!IEM_IS_LONG_MODE(pVCpu))
1522 {
1523 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1524 {
1525 if (cbWords)
1526 {
1527 /* Map the relevant chunk of the old stack. */
1528 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds,
1529 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
1530 if (rcStrict != VINF_SUCCESS)
1531 {
1532 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1533 return rcStrict;
1534 }
1535
1536 /* Copy the parameter (d)words. */
1537 for (int i = 0; i < cbWords; ++i)
1538 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i];
1539
1540 /* Unmap the old stack. */
1541 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1542 if (rcStrict != VINF_SUCCESS)
1543 {
1544 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1545 return rcStrict;
1546 }
1547 }
1548
1549 /* Push the old CS:rIP. */
1550 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1551 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1552
1553 /* Push the old SS:rSP. */
1554 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
1555 uPtrRet.pu32[2 + cbWords + 1] = uOldSS;
1556 }
1557 else
1558 {
1559 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1560
1561 if (cbWords)
1562 {
1563 /* Map the relevant chunk of the old stack. */
1564 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds,
1565 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
1566 if (rcStrict != VINF_SUCCESS)
1567 {
1568 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1569 return rcStrict;
1570 }
1571
1572 /* Copy the parameter words. */
1573 for (int i = 0; i < cbWords; ++i)
1574 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i];
1575
1576 /* Unmap the old stack. */
1577 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
1578 if (rcStrict != VINF_SUCCESS)
1579 {
1580 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1581 return rcStrict;
1582 }
1583 }
1584
1585 /* Push the old CS:rIP. */
1586 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1587 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1588
1589 /* Push the old SS:rSP. */
1590 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp;
1591 uPtrRet.pu16[2 + cbWords + 1] = uOldSS;
1592 }
1593 }
1594 else
1595 {
1596 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1597
1598 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
1599 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
1600 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1601 uPtrRet.pu64[2] = uOldRsp;
1602 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */
1603 }
1604
1605 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1606 if (rcStrict != VINF_SUCCESS)
1607 {
1608 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1609 return rcStrict;
1610 }
1611
1612 /* Chop the high bits off if 16-bit gate (Intel says so). */
1613 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1614 uNewRip = (uint16_t)uNewRip;
1615
1616 /* Limit / canonical check. */
1617 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1618 if (!IEM_IS_LONG_MODE(pVCpu))
1619 {
1620 if (uNewRip > cbLimit)
1621 {
1622 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1623 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1624 }
1625 u64Base = X86DESC_BASE(&DescCS.Legacy);
1626 }
1627 else
1628 {
1629 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1630 if (!IEM_IS_CANONICAL(uNewRip))
1631 {
1632 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1633 return iemRaiseNotCanonical(pVCpu);
1634 }
1635 u64Base = 0;
1636 }
1637
1638 /*
1639 * Now set the accessed bit before
1640 * writing the return address to the stack and committing the result into
1641 * CS, CSHID and RIP.
1642 */
1643 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1644 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1645 {
1646 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1647 if (rcStrict != VINF_SUCCESS)
1648 return rcStrict;
1649 /** @todo check what VT-x and AMD-V does. */
1650 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1651 }
1652
1653 /* Commit new CS:rIP. */
1654 pVCpu->cpum.GstCtx.rip = uNewRip;
1655 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1656 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl;
1657 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1658 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1659 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1660 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1661 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1662 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1663 }
1664 else
1665 {
1666 /* Same privilege. */
1667 /** @todo This is very similar to regular far calls; merge! */
1668
1669 /* Check stack first - may #SS(0). */
1670 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
1671 * 16-bit code cause a two or four byte CS to be pushed? */
1672 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
1673 IEM_IS_LONG_MODE(pVCpu) ? 8+8
1674 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
1675 IEM_IS_LONG_MODE(pVCpu) ? 7
1676 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 2,
1677 &uPtrRet.pv, &uNewRsp);
1678 if (rcStrict != VINF_SUCCESS)
1679 return rcStrict;
1680
1681 /* Chop the high bits off if 16-bit gate (Intel says so). */
1682 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1683 uNewRip = (uint16_t)uNewRip;
1684
1685 /* Limit / canonical check. */
1686 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1687 if (!IEM_IS_LONG_MODE(pVCpu))
1688 {
1689 if (uNewRip > cbLimit)
1690 {
1691 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1692 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1693 }
1694 u64Base = X86DESC_BASE(&DescCS.Legacy);
1695 }
1696 else
1697 {
1698 if (!IEM_IS_CANONICAL(uNewRip))
1699 {
1700 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1701 return iemRaiseNotCanonical(pVCpu);
1702 }
1703 u64Base = 0;
1704 }
1705
1706 /*
1707 * Now set the accessed bit before
1708 * writing the return address to the stack and committing the result into
1709 * CS, CSHID and RIP.
1710 */
1711 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1712 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1713 {
1714 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1715 if (rcStrict != VINF_SUCCESS)
1716 return rcStrict;
1717 /** @todo check what VT-x and AMD-V does. */
1718 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1719 }
1720
1721 /* stack */
1722 if (!IEM_IS_LONG_MODE(pVCpu))
1723 {
1724 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1725 {
1726 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1727 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1728 }
1729 else
1730 {
1731 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1732 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1733 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1734 }
1735 }
1736 else
1737 {
1738 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1739 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
1740 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1741 }
1742
1743 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
1744 if (rcStrict != VINF_SUCCESS)
1745 return rcStrict;
1746
1747 /* commit */
1748 pVCpu->cpum.GstCtx.rip = uNewRip;
1749 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1750 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl;
1751 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1752 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1753 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1754 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1755 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1756 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1757 }
1758 }
1759 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
1760/** @todo single stepping */
1761
1762 /* Flush the prefetch buffer. */
1763 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
1764 return VINF_SUCCESS;
1765#endif /* IEM_IMPLEMENTS_CALLGATE */
1766}
1767
1768
1769/**
1770 * Implements far jumps and calls thru system selectors.
1771 *
1772 * @returns VBox strict status code.
1773 * @param pVCpu The cross context virtual CPU structure of the
1774 * calling thread.
1775 * @param cbInstr The current instruction length.
1776 * @param uSel The selector.
1777 * @param enmBranch The kind of branching we're performing.
1778 * @param enmEffOpSize The effective operand size.
1779 * @param pDesc The descriptor corresponding to @a uSel.
1780 */
1781static VBOXSTRICTRC iemCImpl_BranchSysSel(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
1782 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
1783{
1784 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1785 Assert((uSel & X86_SEL_MASK_OFF_RPL));
1786 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1787
1788 if (IEM_IS_LONG_MODE(pVCpu))
1789 switch (pDesc->Legacy.Gen.u4Type)
1790 {
1791 case AMD64_SEL_TYPE_SYS_CALL_GATE:
1792 return iemCImpl_BranchCallGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1793
1794 default:
1795 case AMD64_SEL_TYPE_SYS_LDT:
1796 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
1797 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
1798 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
1799 case AMD64_SEL_TYPE_SYS_INT_GATE:
1800 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1801 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1802 }
1803
1804 switch (pDesc->Legacy.Gen.u4Type)
1805 {
1806 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1807 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1808 return iemCImpl_BranchCallGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1809
1810 case X86_SEL_TYPE_SYS_TASK_GATE:
1811 return iemCImpl_BranchTaskGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1812
1813 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1814 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1815 return iemCImpl_BranchTaskSegment(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1816
1817 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1818 Log(("branch %04x -> busy 286 TSS\n", uSel));
1819 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1820
1821 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1822 Log(("branch %04x -> busy 386 TSS\n", uSel));
1823 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1824
1825 default:
1826 case X86_SEL_TYPE_SYS_LDT:
1827 case X86_SEL_TYPE_SYS_286_INT_GATE:
1828 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1829 case X86_SEL_TYPE_SYS_386_INT_GATE:
1830 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1831 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1832 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1833 }
1834}
1835
1836
1837/**
1838 * Implements far jumps.
1839 *
1840 * @param uSel The selector.
1841 * @param offSeg The segment offset.
1842 * @param enmEffOpSize The effective operand size.
1843 */
1844IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1845{
1846 NOREF(cbInstr);
1847 Assert(offSeg <= UINT32_MAX);
1848
1849 /*
1850 * Real mode and V8086 mode are easy. The only snag seems to be that
1851 * CS.limit doesn't change and the limit check is done against the current
1852 * limit.
1853 */
1854 /** @todo Robert Collins claims (The Segment Descriptor Cache, DDJ August
1855 * 1998) that up to and including the Intel 486, far control
1856 * transfers in real mode set default CS attributes (0x93) and also
1857 * set a 64K segment limit. Starting with the Pentium, the
1858 * attributes and limit are left alone but the access rights are
1859 * ignored. We only implement the Pentium+ behavior.
1860 * */
1861 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
1862 {
1863 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1864 if (offSeg > pVCpu->cpum.GstCtx.cs.u32Limit)
1865 {
1866 Log(("iemCImpl_FarJmp: 16-bit limit\n"));
1867 return iemRaiseGeneralProtectionFault0(pVCpu);
1868 }
1869
1870 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
1871 pVCpu->cpum.GstCtx.rip = offSeg;
1872 else
1873 pVCpu->cpum.GstCtx.rip = offSeg & UINT16_MAX;
1874 pVCpu->cpum.GstCtx.cs.Sel = uSel;
1875 pVCpu->cpum.GstCtx.cs.ValidSel = uSel;
1876 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1877 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4;
1878
1879 return iemRegFinishClearingRF(pVCpu);
1880 }
1881
1882 /*
1883 * Protected mode. Need to parse the specified descriptor...
1884 */
1885 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1886 {
1887 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1888 return iemRaiseGeneralProtectionFault0(pVCpu);
1889 }
1890
1891 /* Fetch the descriptor. */
1892 IEMSELDESC Desc;
1893 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
1894 if (rcStrict != VINF_SUCCESS)
1895 return rcStrict;
1896
1897 /* Is it there? */
1898 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
1899 {
1900 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1901 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1902 }
1903
1904 /*
1905 * Deal with it according to its type. We do the standard code selectors
1906 * here and dispatch the system selectors to worker functions.
1907 */
1908 if (!Desc.Legacy.Gen.u1DescType)
1909 return iemCImpl_BranchSysSel(pVCpu, cbInstr, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
1910
1911 /* Only code segments. */
1912 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1913 {
1914 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1915 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1916 }
1917
1918 /* L vs D. */
1919 if ( Desc.Legacy.Gen.u1Long
1920 && Desc.Legacy.Gen.u1DefBig
1921 && IEM_IS_LONG_MODE(pVCpu))
1922 {
1923 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1924 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1925 }
1926
1927 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1928 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1929 {
1930 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl)
1931 {
1932 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1933 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1934 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1935 }
1936 }
1937 else
1938 {
1939 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl)
1940 {
1941 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
1942 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1943 }
1944 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl)
1945 {
1946 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl));
1947 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1948 }
1949 }
1950
1951 /* Chop the high bits if 16-bit (Intel says so). */
1952 if (enmEffOpSize == IEMMODE_16BIT)
1953 offSeg &= UINT16_MAX;
1954
1955 /* Limit check and get the base. */
1956 uint64_t u64Base;
1957 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1958 if ( !Desc.Legacy.Gen.u1Long
1959 || !IEM_IS_LONG_MODE(pVCpu))
1960 {
1961 if (RT_LIKELY(offSeg <= cbLimit))
1962 u64Base = X86DESC_BASE(&Desc.Legacy);
1963 else
1964 {
1965 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1966 /** @todo Intel says this is \#GP(0)! */
1967 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1968 }
1969 }
1970 else
1971 u64Base = 0;
1972
1973 /*
1974 * Ok, everything checked out fine. Now set the accessed bit before
1975 * committing the result into CS, CSHID and RIP.
1976 */
1977 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1978 {
1979 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
1980 if (rcStrict != VINF_SUCCESS)
1981 return rcStrict;
1982 /** @todo check what VT-x and AMD-V does. */
1983 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1984 }
1985
1986 /* commit */
1987 pVCpu->cpum.GstCtx.rip = offSeg;
1988 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1989 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */
1990 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1991 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1992 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1993 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1994 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1995 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1996 /** @todo check if the hidden bits are loaded correctly for 64-bit
1997 * mode. */
1998
1999 /* Flush the prefetch buffer. */
2000 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
2001
2002 return iemRegFinishClearingRF(pVCpu);
2003}
2004
2005
2006/**
2007 * Implements far calls.
2008 *
2009 * This very similar to iemCImpl_FarJmp.
2010 *
2011 * @param uSel The selector.
2012 * @param offSeg The segment offset.
2013 * @param enmEffOpSize The operand size (in case we need it).
2014 */
2015IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
2016{
2017 VBOXSTRICTRC rcStrict;
2018 uint64_t uNewRsp;
2019 RTPTRUNION uPtrRet;
2020
2021 /*
2022 * Real mode and V8086 mode are easy. The only snag seems to be that
2023 * CS.limit doesn't change and the limit check is done against the current
2024 * limit.
2025 */
2026 /** @todo See comment for similar code in iemCImpl_FarJmp */
2027 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2028 {
2029 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
2030
2031 /* Check stack first - may #SS(0). */
2032 rcStrict = iemMemStackPushBeginSpecial(pVCpu, enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2033 enmEffOpSize == IEMMODE_32BIT ? 3 : 1,
2034 &uPtrRet.pv, &uNewRsp);
2035 if (rcStrict != VINF_SUCCESS)
2036 return rcStrict;
2037
2038 /* Check the target address range. */
2039/** @todo this must be wrong! Write unreal mode tests! */
2040 if (offSeg > UINT32_MAX)
2041 return iemRaiseGeneralProtectionFault0(pVCpu);
2042
2043 /* Everything is fine, push the return address. */
2044 if (enmEffOpSize == IEMMODE_16BIT)
2045 {
2046 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
2047 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
2048 }
2049 else
2050 {
2051 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
2052 uPtrRet.pu16[2] = pVCpu->cpum.GstCtx.cs.Sel;
2053 }
2054 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2055 if (rcStrict != VINF_SUCCESS)
2056 return rcStrict;
2057
2058 /* Branch. */
2059 pVCpu->cpum.GstCtx.rip = offSeg;
2060 pVCpu->cpum.GstCtx.cs.Sel = uSel;
2061 pVCpu->cpum.GstCtx.cs.ValidSel = uSel;
2062 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2063 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4;
2064
2065 return iemRegFinishClearingRF(pVCpu);
2066 }
2067
2068 /*
2069 * Protected mode. Need to parse the specified descriptor...
2070 */
2071 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2072 {
2073 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
2074 return iemRaiseGeneralProtectionFault0(pVCpu);
2075 }
2076
2077 /* Fetch the descriptor. */
2078 IEMSELDESC Desc;
2079 rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
2080 if (rcStrict != VINF_SUCCESS)
2081 return rcStrict;
2082
2083 /*
2084 * Deal with it according to its type. We do the standard code selectors
2085 * here and dispatch the system selectors to worker functions.
2086 */
2087 if (!Desc.Legacy.Gen.u1DescType)
2088 return iemCImpl_BranchSysSel(pVCpu, cbInstr, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
2089
2090 /* Only code segments. */
2091 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2092 {
2093 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
2094 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2095 }
2096
2097 /* L vs D. */
2098 if ( Desc.Legacy.Gen.u1Long
2099 && Desc.Legacy.Gen.u1DefBig
2100 && IEM_IS_LONG_MODE(pVCpu))
2101 {
2102 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
2103 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2104 }
2105
2106 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
2107 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2108 {
2109 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl)
2110 {
2111 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
2112 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2113 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2114 }
2115 }
2116 else
2117 {
2118 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl)
2119 {
2120 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2121 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2122 }
2123 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl)
2124 {
2125 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl));
2126 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2127 }
2128 }
2129
2130 /* Is it there? */
2131 if (!Desc.Legacy.Gen.u1Present)
2132 {
2133 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
2134 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
2135 }
2136
2137 /* Check stack first - may #SS(0). */
2138 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
2139 * 16-bit code cause a two or four byte CS to be pushed? */
2140 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
2141 enmEffOpSize == IEMMODE_64BIT ? 8+8 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2142 enmEffOpSize == IEMMODE_64BIT ? 7 : enmEffOpSize == IEMMODE_32BIT ? 3 : 1,
2143 &uPtrRet.pv, &uNewRsp);
2144 if (rcStrict != VINF_SUCCESS)
2145 return rcStrict;
2146
2147 /* Chop the high bits if 16-bit (Intel says so). */
2148 if (enmEffOpSize == IEMMODE_16BIT)
2149 offSeg &= UINT16_MAX;
2150
2151 /* Limit / canonical check. */
2152 uint64_t u64Base;
2153 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2154 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2155 {
2156 if (!IEM_IS_CANONICAL(offSeg))
2157 {
2158 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
2159 return iemRaiseNotCanonical(pVCpu);
2160 }
2161 u64Base = 0;
2162 }
2163 else
2164 {
2165 if (offSeg > cbLimit)
2166 {
2167 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
2168 /** @todo Intel says this is \#GP(0)! */
2169 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2170 }
2171 u64Base = X86DESC_BASE(&Desc.Legacy);
2172 }
2173
2174 /*
2175 * Now set the accessed bit before
2176 * writing the return address to the stack and committing the result into
2177 * CS, CSHID and RIP.
2178 */
2179 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2180 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2181 {
2182 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2183 if (rcStrict != VINF_SUCCESS)
2184 return rcStrict;
2185 /** @todo check what VT-x and AMD-V does. */
2186 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2187 }
2188
2189 /* stack */
2190 if (enmEffOpSize == IEMMODE_16BIT)
2191 {
2192 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
2193 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
2194 }
2195 else if (enmEffOpSize == IEMMODE_32BIT)
2196 {
2197 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
2198 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
2199 }
2200 else
2201 {
2202 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
2203 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
2204 }
2205 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
2206 if (rcStrict != VINF_SUCCESS)
2207 return rcStrict;
2208
2209 /* commit */
2210 pVCpu->cpum.GstCtx.rip = offSeg;
2211 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2212 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl;
2213 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
2214 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2215 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2216 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2217 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2218 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
2219 /** @todo check if the hidden bits are loaded correctly for 64-bit
2220 * mode. */
2221
2222 /* Flush the prefetch buffer. */
2223 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
2224
2225 return iemRegFinishClearingRF(pVCpu);
2226}
2227
2228
2229/**
2230 * Implements retf.
2231 *
2232 * @param enmEffOpSize The effective operand size.
2233 * @param cbPop The amount of arguments to pop from the stack
2234 * (bytes).
2235 */
2236IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2237{
2238 VBOXSTRICTRC rcStrict;
2239 RTCPTRUNION uPtrFrame;
2240 uint64_t uNewRsp;
2241 uint64_t uNewRip;
2242 uint16_t uNewCs;
2243 NOREF(cbInstr);
2244
2245 /*
2246 * Read the stack values first.
2247 */
2248 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
2249 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
2250 rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr,
2251 enmEffOpSize == IEMMODE_16BIT ? 1 : enmEffOpSize == IEMMODE_32BIT ? 3 : 7,
2252 &uPtrFrame.pv, &uNewRsp);
2253 if (rcStrict != VINF_SUCCESS)
2254 return rcStrict;
2255 if (enmEffOpSize == IEMMODE_16BIT)
2256 {
2257 uNewRip = uPtrFrame.pu16[0];
2258 uNewCs = uPtrFrame.pu16[1];
2259 }
2260 else if (enmEffOpSize == IEMMODE_32BIT)
2261 {
2262 uNewRip = uPtrFrame.pu32[0];
2263 uNewCs = uPtrFrame.pu16[2];
2264 }
2265 else
2266 {
2267 uNewRip = uPtrFrame.pu64[0];
2268 uNewCs = uPtrFrame.pu16[4];
2269 }
2270 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2271 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2272 { /* extremely likely */ }
2273 else
2274 return rcStrict;
2275
2276 /*
2277 * Real mode and V8086 mode are easy.
2278 */
2279 /** @todo See comment for similar code in iemCImpl_FarJmp */
2280 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2281 {
2282 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2283 /** @todo check how this is supposed to work if sp=0xfffe. */
2284
2285 /* Check the limit of the new EIP. */
2286 /** @todo Intel pseudo code only does the limit check for 16-bit
2287 * operands, AMD does not make any distinction. What is right? */
2288 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
2289 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2290
2291 /* commit the operation. */
2292 pVCpu->cpum.GstCtx.rsp = uNewRsp;
2293 pVCpu->cpum.GstCtx.rip = uNewRip;
2294 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2295 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2296 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2297 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4;
2298 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
2299 if (cbPop)
2300 iemRegAddToRsp(pVCpu, cbPop);
2301 return VINF_SUCCESS;
2302 }
2303
2304 /*
2305 * Protected mode is complicated, of course.
2306 */
2307 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2308 {
2309 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
2310 return iemRaiseGeneralProtectionFault0(pVCpu);
2311 }
2312
2313 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2314
2315 /* Fetch the descriptor. */
2316 IEMSELDESC DescCs;
2317 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCs, uNewCs, X86_XCPT_GP);
2318 if (rcStrict != VINF_SUCCESS)
2319 return rcStrict;
2320
2321 /* Can only return to a code selector. */
2322 if ( !DescCs.Legacy.Gen.u1DescType
2323 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
2324 {
2325 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
2326 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
2327 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2328 }
2329
2330 /* L vs D. */
2331 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
2332 && DescCs.Legacy.Gen.u1DefBig
2333 && IEM_IS_LONG_MODE(pVCpu))
2334 {
2335 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
2336 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2337 }
2338
2339 /* DPL/RPL/CPL checks. */
2340 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
2341 {
2342 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl));
2343 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2344 }
2345
2346 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2347 {
2348 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
2349 {
2350 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
2351 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2352 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2353 }
2354 }
2355 else
2356 {
2357 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
2358 {
2359 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
2360 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2361 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2362 }
2363 }
2364
2365 /* Is it there? */
2366 if (!DescCs.Legacy.Gen.u1Present)
2367 {
2368 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
2369 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2370 }
2371
2372 /*
2373 * Return to outer privilege? (We'll typically have entered via a call gate.)
2374 */
2375 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
2376 {
2377 /* Read the outer stack pointer stored *after* the parameters. */
2378 rcStrict = iemMemStackPopContinueSpecial(pVCpu, cbPop /*off*/, cbRetPtr, &uPtrFrame.pv, uNewRsp);
2379 if (rcStrict != VINF_SUCCESS)
2380 return rcStrict;
2381
2382 uint16_t uNewOuterSs;
2383 uint64_t uNewOuterRsp;
2384 if (enmEffOpSize == IEMMODE_16BIT)
2385 {
2386 uNewOuterRsp = uPtrFrame.pu16[0];
2387 uNewOuterSs = uPtrFrame.pu16[1];
2388 }
2389 else if (enmEffOpSize == IEMMODE_32BIT)
2390 {
2391 uNewOuterRsp = uPtrFrame.pu32[0];
2392 uNewOuterSs = uPtrFrame.pu16[2];
2393 }
2394 else
2395 {
2396 uNewOuterRsp = uPtrFrame.pu64[0];
2397 uNewOuterSs = uPtrFrame.pu16[4];
2398 }
2399 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
2400 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2401 { /* extremely likely */ }
2402 else
2403 return rcStrict;
2404
2405 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
2406 and read the selector. */
2407 IEMSELDESC DescSs;
2408 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
2409 {
2410 if ( !DescCs.Legacy.Gen.u1Long
2411 || (uNewOuterSs & X86_SEL_RPL) == 3)
2412 {
2413 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
2414 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2415 return iemRaiseGeneralProtectionFault0(pVCpu);
2416 }
2417 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
2418 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
2419 }
2420 else
2421 {
2422 /* Fetch the descriptor for the new stack segment. */
2423 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);
2424 if (rcStrict != VINF_SUCCESS)
2425 return rcStrict;
2426 }
2427
2428 /* Check that RPL of stack and code selectors match. */
2429 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
2430 {
2431 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2432 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2433 }
2434
2435 /* Must be a writable data segment. */
2436 if ( !DescSs.Legacy.Gen.u1DescType
2437 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2438 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2439 {
2440 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
2441 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
2442 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2443 }
2444
2445 /* L vs D. (Not mentioned by intel.) */
2446 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
2447 && DescSs.Legacy.Gen.u1DefBig
2448 && IEM_IS_LONG_MODE(pVCpu))
2449 {
2450 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
2451 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2452 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2453 }
2454
2455 /* DPL/RPL/CPL checks. */
2456 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2457 {
2458 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
2459 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
2460 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2461 }
2462
2463 /* Is it there? */
2464 if (!DescSs.Legacy.Gen.u1Present)
2465 {
2466 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2467 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2468 }
2469
2470 /* Calc SS limit.*/
2471 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
2472
2473 /* Is RIP canonical or within CS.limit? */
2474 uint64_t u64Base;
2475 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2476
2477 /** @todo Testcase: Is this correct? */
2478 if ( DescCs.Legacy.Gen.u1Long
2479 && IEM_IS_LONG_MODE(pVCpu) )
2480 {
2481 if (!IEM_IS_CANONICAL(uNewRip))
2482 {
2483 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
2484 return iemRaiseNotCanonical(pVCpu);
2485 }
2486 u64Base = 0;
2487 }
2488 else
2489 {
2490 if (uNewRip > cbLimitCs)
2491 {
2492 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
2493 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
2494 /** @todo Intel says this is \#GP(0)! */
2495 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2496 }
2497 u64Base = X86DESC_BASE(&DescCs.Legacy);
2498 }
2499
2500 /*
2501 * Now set the accessed bit before
2502 * writing the return address to the stack and committing the result into
2503 * CS, CSHID and RIP.
2504 */
2505 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
2506 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2507 {
2508 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2509 if (rcStrict != VINF_SUCCESS)
2510 return rcStrict;
2511 /** @todo check what VT-x and AMD-V does. */
2512 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2513 }
2514 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
2515 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2516 {
2517 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewOuterSs);
2518 if (rcStrict != VINF_SUCCESS)
2519 return rcStrict;
2520 /** @todo check what VT-x and AMD-V does. */
2521 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2522 }
2523
2524 /* commit */
2525 if (enmEffOpSize == IEMMODE_16BIT)
2526 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2527 else
2528 pVCpu->cpum.GstCtx.rip = uNewRip;
2529 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2530 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2531 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2532 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2533 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs;
2534 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2535 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
2536 pVCpu->cpum.GstCtx.ss.Sel = uNewOuterSs;
2537 pVCpu->cpum.GstCtx.ss.ValidSel = uNewOuterSs;
2538 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2539 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
2540 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
2541 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2542 pVCpu->cpum.GstCtx.ss.u64Base = 0;
2543 else
2544 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
2545 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2546 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewOuterRsp;
2547 else
2548 pVCpu->cpum.GstCtx.rsp = uNewOuterRsp;
2549
2550 pVCpu->iem.s.uCpl = (uNewCs & X86_SEL_RPL);
2551 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds);
2552 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es);
2553 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs);
2554 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs);
2555
2556 /** @todo check if the hidden bits are loaded correctly for 64-bit
2557 * mode. */
2558
2559 if (cbPop)
2560 iemRegAddToRsp(pVCpu, cbPop);
2561 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
2562
2563 /* Done! */
2564 }
2565 /*
2566 * Return to the same privilege level
2567 */
2568 else
2569 {
2570 /* Limit / canonical check. */
2571 uint64_t u64Base;
2572 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2573
2574 /** @todo Testcase: Is this correct? */
2575 if ( DescCs.Legacy.Gen.u1Long
2576 && IEM_IS_LONG_MODE(pVCpu) )
2577 {
2578 if (!IEM_IS_CANONICAL(uNewRip))
2579 {
2580 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
2581 return iemRaiseNotCanonical(pVCpu);
2582 }
2583 u64Base = 0;
2584 }
2585 else
2586 {
2587 if (uNewRip > cbLimitCs)
2588 {
2589 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
2590 /** @todo Intel says this is \#GP(0)! */
2591 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2592 }
2593 u64Base = X86DESC_BASE(&DescCs.Legacy);
2594 }
2595
2596 /*
2597 * Now set the accessed bit before
2598 * writing the return address to the stack and committing the result into
2599 * CS, CSHID and RIP.
2600 */
2601 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2602 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2603 {
2604 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2605 if (rcStrict != VINF_SUCCESS)
2606 return rcStrict;
2607 /** @todo check what VT-x and AMD-V does. */
2608 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2609 }
2610
2611 /* commit */
2612 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2613 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
2614 else
2615 pVCpu->cpum.GstCtx.rsp = uNewRsp;
2616 if (enmEffOpSize == IEMMODE_16BIT)
2617 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2618 else
2619 pVCpu->cpum.GstCtx.rip = uNewRip;
2620 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2621 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2622 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2623 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2624 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs;
2625 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2626 /** @todo check if the hidden bits are loaded correctly for 64-bit
2627 * mode. */
2628 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
2629 if (cbPop)
2630 iemRegAddToRsp(pVCpu, cbPop);
2631 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
2632 }
2633
2634 /* Flush the prefetch buffer. */
2635 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo use light flush for same privlege? */
2636 return VINF_SUCCESS;
2637}
2638
2639
2640/**
2641 * Implements retn.
2642 *
2643 * We're doing this in C because of the \#GP that might be raised if the popped
2644 * program counter is out of bounds.
2645 *
2646 * @param enmEffOpSize The effective operand size.
2647 * @param cbPop The amount of arguments to pop from the stack
2648 * (bytes).
2649 */
2650IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2651{
2652 NOREF(cbInstr);
2653
2654 /* Fetch the RSP from the stack. */
2655 VBOXSTRICTRC rcStrict;
2656 RTUINT64U NewRip;
2657 RTUINT64U NewRsp;
2658 NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2659
2660 switch (enmEffOpSize)
2661 {
2662 case IEMMODE_16BIT:
2663 NewRip.u = 0;
2664 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRip.Words.w0, &NewRsp);
2665 break;
2666 case IEMMODE_32BIT:
2667 NewRip.u = 0;
2668 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRip.DWords.dw0, &NewRsp);
2669 break;
2670 case IEMMODE_64BIT:
2671 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRip.u, &NewRsp);
2672 break;
2673 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2674 }
2675 if (rcStrict != VINF_SUCCESS)
2676 return rcStrict;
2677
2678 /* Check the new RSP before loading it. */
2679 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
2680 * of it. The canonical test is performed here and for call. */
2681 if (enmEffOpSize != IEMMODE_64BIT)
2682 {
2683 if (NewRip.DWords.dw0 > pVCpu->cpum.GstCtx.cs.u32Limit)
2684 {
2685 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pVCpu->cpum.GstCtx.cs.u32Limit));
2686 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2687 }
2688 }
2689 else
2690 {
2691 if (!IEM_IS_CANONICAL(NewRip.u))
2692 {
2693 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
2694 return iemRaiseNotCanonical(pVCpu);
2695 }
2696 }
2697
2698 /* Apply cbPop */
2699 if (cbPop)
2700 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
2701
2702 /* Commit it. */
2703 pVCpu->cpum.GstCtx.rip = NewRip.u;
2704 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2705 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
2706
2707 /* Flush the prefetch buffer. */
2708 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo only need a light flush here, don't we? We don't really need any flushing... */
2709 return VINF_SUCCESS;
2710}
2711
2712
2713/**
2714 * Implements enter.
2715 *
2716 * We're doing this in C because the instruction is insane, even for the
2717 * u8NestingLevel=0 case dealing with the stack is tedious.
2718 *
2719 * @param enmEffOpSize The effective operand size.
2720 * @param cbFrame Frame size.
2721 * @param cParameters Frame parameter count.
2722 */
2723IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
2724{
2725 /* Push RBP, saving the old value in TmpRbp. */
2726 RTUINT64U NewRsp; NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2727 RTUINT64U TmpRbp; TmpRbp.u = pVCpu->cpum.GstCtx.rbp;
2728 RTUINT64U NewRbp;
2729 VBOXSTRICTRC rcStrict;
2730 if (enmEffOpSize == IEMMODE_64BIT)
2731 {
2732 rcStrict = iemMemStackPushU64Ex(pVCpu, TmpRbp.u, &NewRsp);
2733 NewRbp = NewRsp;
2734 }
2735 else if (enmEffOpSize == IEMMODE_32BIT)
2736 {
2737 rcStrict = iemMemStackPushU32Ex(pVCpu, TmpRbp.DWords.dw0, &NewRsp);
2738 NewRbp = NewRsp;
2739 }
2740 else
2741 {
2742 rcStrict = iemMemStackPushU16Ex(pVCpu, TmpRbp.Words.w0, &NewRsp);
2743 NewRbp = TmpRbp;
2744 NewRbp.Words.w0 = NewRsp.Words.w0;
2745 }
2746 if (rcStrict != VINF_SUCCESS)
2747 return rcStrict;
2748
2749 /* Copy the parameters (aka nesting levels by Intel). */
2750 cParameters &= 0x1f;
2751 if (cParameters > 0)
2752 {
2753 switch (enmEffOpSize)
2754 {
2755 case IEMMODE_16BIT:
2756 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2757 TmpRbp.DWords.dw0 -= 2;
2758 else
2759 TmpRbp.Words.w0 -= 2;
2760 do
2761 {
2762 uint16_t u16Tmp;
2763 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Tmp, &TmpRbp);
2764 if (rcStrict != VINF_SUCCESS)
2765 break;
2766 rcStrict = iemMemStackPushU16Ex(pVCpu, u16Tmp, &NewRsp);
2767 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2768 break;
2769
2770 case IEMMODE_32BIT:
2771 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2772 TmpRbp.DWords.dw0 -= 4;
2773 else
2774 TmpRbp.Words.w0 -= 4;
2775 do
2776 {
2777 uint32_t u32Tmp;
2778 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Tmp, &TmpRbp);
2779 if (rcStrict != VINF_SUCCESS)
2780 break;
2781 rcStrict = iemMemStackPushU32Ex(pVCpu, u32Tmp, &NewRsp);
2782 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2783 break;
2784
2785 case IEMMODE_64BIT:
2786 TmpRbp.u -= 8;
2787 do
2788 {
2789 uint64_t u64Tmp;
2790 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Tmp, &TmpRbp);
2791 if (rcStrict != VINF_SUCCESS)
2792 break;
2793 rcStrict = iemMemStackPushU64Ex(pVCpu, u64Tmp, &NewRsp);
2794 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2795 break;
2796
2797 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2798 }
2799 if (rcStrict != VINF_SUCCESS)
2800 return VINF_SUCCESS;
2801
2802 /* Push the new RBP */
2803 if (enmEffOpSize == IEMMODE_64BIT)
2804 rcStrict = iemMemStackPushU64Ex(pVCpu, NewRbp.u, &NewRsp);
2805 else if (enmEffOpSize == IEMMODE_32BIT)
2806 rcStrict = iemMemStackPushU32Ex(pVCpu, NewRbp.DWords.dw0, &NewRsp);
2807 else
2808 rcStrict = iemMemStackPushU16Ex(pVCpu, NewRbp.Words.w0, &NewRsp);
2809 if (rcStrict != VINF_SUCCESS)
2810 return rcStrict;
2811
2812 }
2813
2814 /* Recalc RSP. */
2815 iemRegSubFromRspEx(pVCpu, &NewRsp, cbFrame);
2816
2817 /** @todo Should probe write access at the new RSP according to AMD. */
2818 /** @todo Should handle accesses to the VMX APIC-access page. */
2819
2820 /* Commit it. */
2821 pVCpu->cpum.GstCtx.rbp = NewRbp.u;
2822 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2823 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
2824}
2825
2826
2827
2828/**
2829 * Implements leave.
2830 *
2831 * We're doing this in C because messing with the stack registers is annoying
2832 * since they depends on SS attributes.
2833 *
2834 * @param enmEffOpSize The effective operand size.
2835 */
2836IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
2837{
2838 /* Calculate the intermediate RSP from RBP and the stack attributes. */
2839 RTUINT64U NewRsp;
2840 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2841 NewRsp.u = pVCpu->cpum.GstCtx.rbp;
2842 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2843 NewRsp.u = pVCpu->cpum.GstCtx.ebp;
2844 else
2845 {
2846 /** @todo Check that LEAVE actually preserve the high EBP bits. */
2847 NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2848 NewRsp.Words.w0 = pVCpu->cpum.GstCtx.bp;
2849 }
2850
2851 /* Pop RBP according to the operand size. */
2852 VBOXSTRICTRC rcStrict;
2853 RTUINT64U NewRbp;
2854 switch (enmEffOpSize)
2855 {
2856 case IEMMODE_16BIT:
2857 NewRbp.u = pVCpu->cpum.GstCtx.rbp;
2858 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRbp.Words.w0, &NewRsp);
2859 break;
2860 case IEMMODE_32BIT:
2861 NewRbp.u = 0;
2862 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRbp.DWords.dw0, &NewRsp);
2863 break;
2864 case IEMMODE_64BIT:
2865 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRbp.u, &NewRsp);
2866 break;
2867 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2868 }
2869 if (rcStrict != VINF_SUCCESS)
2870 return rcStrict;
2871
2872
2873 /* Commit it. */
2874 pVCpu->cpum.GstCtx.rbp = NewRbp.u;
2875 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2876 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
2877}
2878
2879
2880/**
2881 * Implements int3 and int XX.
2882 *
2883 * @param u8Int The interrupt vector number.
2884 * @param enmInt The int instruction type.
2885 */
2886IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt)
2887{
2888 Assert(pVCpu->iem.s.cXcptRecursions == 0);
2889
2890 /*
2891 * We must check if this INT3 might belong to DBGF before raising a #BP.
2892 */
2893 if (u8Int == 3)
2894 {
2895 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2896 if (pVM->dbgf.ro.cEnabledInt3Breakpoints == 0)
2897 { /* likely: No vbox debugger breakpoints */ }
2898 else
2899 {
2900 VBOXSTRICTRC rcStrict = DBGFTrap03Handler(pVM, pVCpu, &pVCpu->cpum.GstCtx);
2901 Log(("iemCImpl_int: DBGFTrap03Handler -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2902 if (rcStrict != VINF_EM_RAW_GUEST_TRAP)
2903 return iemSetPassUpStatus(pVCpu, rcStrict);
2904 }
2905 }
2906/** @todo single stepping */
2907 return iemRaiseXcptOrInt(pVCpu,
2908 cbInstr,
2909 u8Int,
2910 IEM_XCPT_FLAGS_T_SOFT_INT | enmInt,
2911 0,
2912 0);
2913}
2914
2915
2916/**
2917 * Implements iret for real mode and V8086 mode.
2918 *
2919 * @param enmEffOpSize The effective operand size.
2920 */
2921IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
2922{
2923 X86EFLAGS Efl;
2924 Efl.u = IEMMISC_GET_EFL(pVCpu);
2925 NOREF(cbInstr);
2926
2927 /*
2928 * iret throws an exception if VME isn't enabled.
2929 */
2930 if ( Efl.Bits.u1VM
2931 && Efl.Bits.u2IOPL != 3
2932 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME))
2933 return iemRaiseGeneralProtectionFault0(pVCpu);
2934
2935 /*
2936 * Do the stack bits, but don't commit RSP before everything checks
2937 * out right.
2938 */
2939 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2940 VBOXSTRICTRC rcStrict;
2941 RTCPTRUNION uFrame;
2942 uint16_t uNewCs;
2943 uint32_t uNewEip;
2944 uint32_t uNewFlags;
2945 uint64_t uNewRsp;
2946 if (enmEffOpSize == IEMMODE_32BIT)
2947 {
2948 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 1, &uFrame.pv, &uNewRsp);
2949 if (rcStrict != VINF_SUCCESS)
2950 return rcStrict;
2951 uNewEip = uFrame.pu32[0];
2952 if (uNewEip > UINT16_MAX)
2953 return iemRaiseGeneralProtectionFault0(pVCpu);
2954
2955 uNewCs = (uint16_t)uFrame.pu32[1];
2956 uNewFlags = uFrame.pu32[2];
2957 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2958 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
2959 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
2960 | X86_EFL_ID;
2961 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
2962 uNewFlags &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
2963 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
2964 }
2965 else
2966 {
2967 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &uNewRsp);
2968 if (rcStrict != VINF_SUCCESS)
2969 return rcStrict;
2970 uNewEip = uFrame.pu16[0];
2971 uNewCs = uFrame.pu16[1];
2972 uNewFlags = uFrame.pu16[2];
2973 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2974 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
2975 uNewFlags |= Efl.u & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF);
2976 /** @todo The intel pseudo code does not indicate what happens to
2977 * reserved flags. We just ignore them. */
2978 /* Ancient CPU adjustments: See iemCImpl_popf. */
2979 if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286)
2980 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL);
2981 }
2982 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uFrame.pv);
2983 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2984 { /* extremely likely */ }
2985 else
2986 return rcStrict;
2987
2988 /** @todo Check how this is supposed to work if sp=0xfffe. */
2989 Log7(("iemCImpl_iret_real_v8086: uNewCs=%#06x uNewRip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n",
2990 uNewCs, uNewEip, uNewFlags, uNewRsp));
2991
2992 /*
2993 * Check the limit of the new EIP.
2994 */
2995 /** @todo Only the AMD pseudo code check the limit here, what's
2996 * right? */
2997 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
2998 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2999
3000 /*
3001 * V8086 checks and flag adjustments
3002 */
3003 if (Efl.Bits.u1VM)
3004 {
3005 if (Efl.Bits.u2IOPL == 3)
3006 {
3007 /* Preserve IOPL and clear RF. */
3008 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
3009 uNewFlags |= Efl.u & (X86_EFL_IOPL);
3010 }
3011 else if ( enmEffOpSize == IEMMODE_16BIT
3012 && ( !(uNewFlags & X86_EFL_IF)
3013 || !Efl.Bits.u1VIP )
3014 && !(uNewFlags & X86_EFL_TF) )
3015 {
3016 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
3017 uNewFlags &= ~X86_EFL_VIF;
3018 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
3019 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
3020 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
3021 }
3022 else
3023 return iemRaiseGeneralProtectionFault0(pVCpu);
3024 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags));
3025 }
3026
3027 /*
3028 * Commit the operation.
3029 */
3030#ifdef DBGFTRACE_ENABLED
3031 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
3032 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewRsp);
3033#endif
3034 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3035 pVCpu->cpum.GstCtx.rip = uNewEip;
3036 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3037 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3038 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3039 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4;
3040 /** @todo do we load attribs and limit as well? */
3041 Assert(uNewFlags & X86_EFL_1);
3042 IEMMISC_SET_EFL(pVCpu, uNewFlags);
3043
3044 /* Flush the prefetch buffer. */
3045 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo can do light flush in real mode at least */
3046
3047/** @todo single stepping */
3048 return VINF_SUCCESS;
3049}
3050
3051
3052/**
3053 * Loads a segment register when entering V8086 mode.
3054 *
3055 * @param pSReg The segment register.
3056 * @param uSeg The segment to load.
3057 */
3058static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
3059{
3060 pSReg->Sel = uSeg;
3061 pSReg->ValidSel = uSeg;
3062 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3063 pSReg->u64Base = (uint32_t)uSeg << 4;
3064 pSReg->u32Limit = 0xffff;
3065 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
3066 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
3067 * IRET'ing to V8086. */
3068}
3069
3070
3071/**
3072 * Implements iret for protected mode returning to V8086 mode.
3073 *
3074 * @param uNewEip The new EIP.
3075 * @param uNewCs The new CS.
3076 * @param uNewFlags The new EFLAGS.
3077 * @param uNewRsp The RSP after the initial IRET frame.
3078 *
3079 * @note This can only be a 32-bit iret du to the X86_EFL_VM position.
3080 */
3081IEM_CIMPL_DEF_4(iemCImpl_iret_prot_v8086, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp)
3082{
3083 RT_NOREF_PV(cbInstr);
3084 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK);
3085
3086 /*
3087 * Pop the V8086 specific frame bits off the stack.
3088 */
3089 VBOXSTRICTRC rcStrict;
3090 RTCPTRUNION uFrame;
3091 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 24 /*cbMem*/, &uFrame.pv, uNewRsp);
3092 if (rcStrict != VINF_SUCCESS)
3093 return rcStrict;
3094 uint32_t uNewEsp = uFrame.pu32[0];
3095 uint16_t uNewSs = uFrame.pu32[1];
3096 uint16_t uNewEs = uFrame.pu32[2];
3097 uint16_t uNewDs = uFrame.pu32[3];
3098 uint16_t uNewFs = uFrame.pu32[4];
3099 uint16_t uNewGs = uFrame.pu32[5];
3100 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
3101 if (rcStrict != VINF_SUCCESS)
3102 return rcStrict;
3103
3104 /*
3105 * Commit the operation.
3106 */
3107 uNewFlags &= X86_EFL_LIVE_MASK;
3108 uNewFlags |= X86_EFL_RA1_MASK;
3109#ifdef DBGFTRACE_ENABLED
3110 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
3111 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);
3112#endif
3113 Log7(("iemCImpl_iret_prot_v8086: %04x:%08x -> %04x:%04x %x %04x:%04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp));
3114
3115 IEMMISC_SET_EFL(pVCpu, uNewFlags);
3116 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.cs, uNewCs);
3117 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ss, uNewSs);
3118 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.es, uNewEs);
3119 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ds, uNewDs);
3120 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.fs, uNewFs);
3121 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.gs, uNewGs);
3122 pVCpu->cpum.GstCtx.rip = (uint16_t)uNewEip;
3123 pVCpu->cpum.GstCtx.rsp = uNewEsp; /** @todo check this out! */
3124 pVCpu->iem.s.uCpl = 3;
3125
3126 /* Flush the prefetch buffer. */
3127 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
3128
3129/** @todo single stepping */
3130 return VINF_SUCCESS;
3131}
3132
3133
3134/**
3135 * Implements iret for protected mode returning via a nested task.
3136 *
3137 * @param enmEffOpSize The effective operand size.
3138 */
3139IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
3140{
3141 Log7(("iemCImpl_iret_prot_NestedTask:\n"));
3142#ifndef IEM_IMPLEMENTS_TASKSWITCH
3143 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
3144#else
3145 RT_NOREF_PV(enmEffOpSize);
3146
3147 /*
3148 * Read the segment selector in the link-field of the current TSS.
3149 */
3150 RTSEL uSelRet;
3151 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &uSelRet, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base);
3152 if (rcStrict != VINF_SUCCESS)
3153 return rcStrict;
3154
3155 /*
3156 * Fetch the returning task's TSS descriptor from the GDT.
3157 */
3158 if (uSelRet & X86_SEL_LDT)
3159 {
3160 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet));
3161 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet);
3162 }
3163
3164 IEMSELDESC TssDesc;
3165 rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelRet, X86_XCPT_GP);
3166 if (rcStrict != VINF_SUCCESS)
3167 return rcStrict;
3168
3169 if (TssDesc.Legacy.Gate.u1DescType)
3170 {
3171 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet));
3172 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3173 }
3174
3175 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
3176 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
3177 {
3178 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
3179 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3180 }
3181
3182 if (!TssDesc.Legacy.Gate.u1Present)
3183 {
3184 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet));
3185 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3186 }
3187
3188 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
3189 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
3190 0 /* uCr2 */, uSelRet, &TssDesc);
3191#endif
3192}
3193
3194
3195/**
3196 * Implements iret for protected mode
3197 *
3198 * @param enmEffOpSize The effective operand size.
3199 */
3200IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
3201{
3202 NOREF(cbInstr);
3203 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3204
3205 /*
3206 * Nested task return.
3207 */
3208 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT)
3209 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
3210
3211 /*
3212 * Normal return.
3213 *
3214 * Do the stack bits, but don't commit RSP before everything checks
3215 * out right.
3216 */
3217 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3218 VBOXSTRICTRC rcStrict;
3219 RTCPTRUNION uFrame;
3220 uint16_t uNewCs;
3221 uint32_t uNewEip;
3222 uint32_t uNewFlags;
3223 uint64_t uNewRsp;
3224 if (enmEffOpSize == IEMMODE_32BIT)
3225 {
3226 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 3, &uFrame.pv, &uNewRsp);
3227 if (rcStrict != VINF_SUCCESS)
3228 return rcStrict;
3229 uNewEip = uFrame.pu32[0];
3230 uNewCs = (uint16_t)uFrame.pu32[1];
3231 uNewFlags = uFrame.pu32[2];
3232 }
3233 else
3234 {
3235 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &uNewRsp);
3236 if (rcStrict != VINF_SUCCESS)
3237 return rcStrict;
3238 uNewEip = uFrame.pu16[0];
3239 uNewCs = uFrame.pu16[1];
3240 uNewFlags = uFrame.pu16[2];
3241 }
3242 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3243 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3244 { /* extremely likely */ }
3245 else
3246 return rcStrict;
3247 Log7(("iemCImpl_iret_prot: uNewCs=%#06x uNewEip=%#010x uNewFlags=%#x uNewRsp=%#18llx uCpl=%u\n", uNewCs, uNewEip, uNewFlags, uNewRsp, pVCpu->iem.s.uCpl));
3248
3249 /*
3250 * We're hopefully not returning to V8086 mode...
3251 */
3252 if ( (uNewFlags & X86_EFL_VM)
3253 && pVCpu->iem.s.uCpl == 0)
3254 {
3255 Assert(enmEffOpSize == IEMMODE_32BIT);
3256 return IEM_CIMPL_CALL_4(iemCImpl_iret_prot_v8086, uNewEip, uNewCs, uNewFlags, uNewRsp);
3257 }
3258
3259 /*
3260 * Protected mode.
3261 */
3262 /* Read the CS descriptor. */
3263 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3264 {
3265 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
3266 return iemRaiseGeneralProtectionFault0(pVCpu);
3267 }
3268
3269 IEMSELDESC DescCS;
3270 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3271 if (rcStrict != VINF_SUCCESS)
3272 {
3273 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
3274 return rcStrict;
3275 }
3276
3277 /* Must be a code descriptor. */
3278 if (!DescCS.Legacy.Gen.u1DescType)
3279 {
3280 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3281 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3282 }
3283 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3284 {
3285 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3286 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3287 }
3288
3289 /* Privilege checks. */
3290 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3291 {
3292 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3293 {
3294 Log(("iret %04x:%08x - RPL != DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3295 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3296 }
3297 }
3298 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3299 {
3300 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3301 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3302 }
3303 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
3304 {
3305 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pVCpu->iem.s.uCpl));
3306 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3307 }
3308
3309 /* Present? */
3310 if (!DescCS.Legacy.Gen.u1Present)
3311 {
3312 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
3313 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3314 }
3315
3316 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3317
3318 /*
3319 * Return to outer level?
3320 */
3321 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
3322 {
3323 uint16_t uNewSS;
3324 uint32_t uNewESP;
3325 if (enmEffOpSize == IEMMODE_32BIT)
3326 {
3327 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0/*off*/, 8 /*cbMem*/, &uFrame.pv, uNewRsp);
3328 if (rcStrict != VINF_SUCCESS)
3329 return rcStrict;
3330/** @todo We might be popping a 32-bit ESP from the IRET frame, but whether
3331 * 16-bit or 32-bit are being loaded into SP depends on the D/B
3332 * bit of the popped SS selector it turns out. */
3333 uNewESP = uFrame.pu32[0];
3334 uNewSS = (uint16_t)uFrame.pu32[1];
3335 }
3336 else
3337 {
3338 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 4 /*cbMem*/, &uFrame.pv, uNewRsp);
3339 if (rcStrict != VINF_SUCCESS)
3340 return rcStrict;
3341 uNewESP = uFrame.pu16[0];
3342 uNewSS = uFrame.pu16[1];
3343 }
3344 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
3345 if (rcStrict != VINF_SUCCESS)
3346 return rcStrict;
3347 Log7(("iemCImpl_iret_prot: uNewSS=%#06x uNewESP=%#010x\n", uNewSS, uNewESP));
3348
3349 /* Read the SS descriptor. */
3350 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3351 {
3352 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
3353 return iemRaiseGeneralProtectionFault0(pVCpu);
3354 }
3355
3356 IEMSELDESC DescSS;
3357 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
3358 if (rcStrict != VINF_SUCCESS)
3359 {
3360 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
3361 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
3362 return rcStrict;
3363 }
3364
3365 /* Privilege checks. */
3366 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3367 {
3368 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
3369 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3370 }
3371 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3372 {
3373 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
3374 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
3375 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3376 }
3377
3378 /* Must be a writeable data segment descriptor. */
3379 if (!DescSS.Legacy.Gen.u1DescType)
3380 {
3381 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
3382 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3383 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3384 }
3385 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3386 {
3387 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
3388 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3389 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3390 }
3391
3392 /* Present? */
3393 if (!DescSS.Legacy.Gen.u1Present)
3394 {
3395 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
3396 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
3397 }
3398
3399 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3400
3401 /* Check EIP. */
3402 if (uNewEip > cbLimitCS)
3403 {
3404 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
3405 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
3406 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3407 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3408 }
3409
3410 /*
3411 * Commit the changes, marking CS and SS accessed first since
3412 * that may fail.
3413 */
3414 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3415 {
3416 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3417 if (rcStrict != VINF_SUCCESS)
3418 return rcStrict;
3419 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3420 }
3421 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3422 {
3423 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3424 if (rcStrict != VINF_SUCCESS)
3425 return rcStrict;
3426 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3427 }
3428
3429 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3430 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3431 if (enmEffOpSize != IEMMODE_16BIT)
3432 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3433 if (pVCpu->iem.s.uCpl == 0)
3434 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3435 else if (pVCpu->iem.s.uCpl <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL)
3436 fEFlagsMask |= X86_EFL_IF;
3437 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3438 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3439 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu);
3440 fEFlagsNew &= ~fEFlagsMask;
3441 fEFlagsNew |= uNewFlags & fEFlagsMask;
3442#ifdef DBGFTRACE_ENABLED
3443 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
3444 pVCpu->iem.s.uCpl, uNewCs & X86_SEL_RPL, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3445 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP);
3446#endif
3447
3448 IEMMISC_SET_EFL(pVCpu, fEFlagsNew);
3449 pVCpu->cpum.GstCtx.rip = uNewEip;
3450 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3451 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3452 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3453 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3454 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3455 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3456 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
3457
3458 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3459 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3460 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3461 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3462 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
3463 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3464 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3465 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewESP;
3466 else
3467 pVCpu->cpum.GstCtx.rsp = uNewESP;
3468
3469 pVCpu->iem.s.uCpl = uNewCs & X86_SEL_RPL;
3470 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds);
3471 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es);
3472 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs);
3473 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs);
3474
3475 /* Done! */
3476
3477 }
3478 /*
3479 * Return to the same level.
3480 */
3481 else
3482 {
3483 /* Check EIP. */
3484 if (uNewEip > cbLimitCS)
3485 {
3486 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
3487 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3488 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3489 }
3490
3491 /*
3492 * Commit the changes, marking CS first since it may fail.
3493 */
3494 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3495 {
3496 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3497 if (rcStrict != VINF_SUCCESS)
3498 return rcStrict;
3499 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3500 }
3501
3502 X86EFLAGS NewEfl;
3503 NewEfl.u = IEMMISC_GET_EFL(pVCpu);
3504 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3505 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3506 if (enmEffOpSize != IEMMODE_16BIT)
3507 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3508 if (pVCpu->iem.s.uCpl == 0)
3509 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3510 else if (pVCpu->iem.s.uCpl <= NewEfl.Bits.u2IOPL)
3511 fEFlagsMask |= X86_EFL_IF;
3512 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3513 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3514 NewEfl.u &= ~fEFlagsMask;
3515 NewEfl.u |= fEFlagsMask & uNewFlags;
3516#ifdef DBGFTRACE_ENABLED
3517 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
3518 pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3519 uNewCs, uNewEip, uNewFlags, pVCpu->cpum.GstCtx.ss.Sel, uNewRsp);
3520#endif
3521
3522 IEMMISC_SET_EFL(pVCpu, NewEfl.u);
3523 pVCpu->cpum.GstCtx.rip = uNewEip;
3524 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3525 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3526 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3527 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3528 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3529 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3530 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
3531 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3532 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
3533 else
3534 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3535 /* Done! */
3536 }
3537
3538 /* Flush the prefetch buffer. */
3539 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo may light flush if same ring? */
3540
3541/** @todo single stepping */
3542 return VINF_SUCCESS;
3543}
3544
3545
3546/**
3547 * Implements iret for long mode
3548 *
3549 * @param enmEffOpSize The effective operand size.
3550 */
3551IEM_CIMPL_DEF_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize)
3552{
3553 NOREF(cbInstr);
3554
3555 /*
3556 * Nested task return is not supported in long mode.
3557 */
3558 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT)
3559 {
3560 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.eflags.u));
3561 return iemRaiseGeneralProtectionFault0(pVCpu);
3562 }
3563
3564 /*
3565 * Normal return.
3566 *
3567 * Do the stack bits, but don't commit RSP before everything checks
3568 * out right.
3569 */
3570 VBOXSTRICTRC rcStrict;
3571 RTCPTRUNION uFrame;
3572 uint64_t uNewRip;
3573 uint16_t uNewCs;
3574 uint16_t uNewSs;
3575 uint32_t uNewFlags;
3576 uint64_t uNewRsp;
3577 if (enmEffOpSize == IEMMODE_64BIT)
3578 {
3579 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, 7, &uFrame.pv, &uNewRsp);
3580 if (rcStrict != VINF_SUCCESS)
3581 return rcStrict;
3582 uNewRip = uFrame.pu64[0];
3583 uNewCs = (uint16_t)uFrame.pu64[1];
3584 uNewFlags = (uint32_t)uFrame.pu64[2];
3585 uNewRsp = uFrame.pu64[3];
3586 uNewSs = (uint16_t)uFrame.pu64[4];
3587 }
3588 else if (enmEffOpSize == IEMMODE_32BIT)
3589 {
3590 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, 3, &uFrame.pv, &uNewRsp);
3591 if (rcStrict != VINF_SUCCESS)
3592 return rcStrict;
3593 uNewRip = uFrame.pu32[0];
3594 uNewCs = (uint16_t)uFrame.pu32[1];
3595 uNewFlags = uFrame.pu32[2];
3596 uNewRsp = uFrame.pu32[3];
3597 uNewSs = (uint16_t)uFrame.pu32[4];
3598 }
3599 else
3600 {
3601 Assert(enmEffOpSize == IEMMODE_16BIT);
3602 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, 1, &uFrame.pv, &uNewRsp);
3603 if (rcStrict != VINF_SUCCESS)
3604 return rcStrict;
3605 uNewRip = uFrame.pu16[0];
3606 uNewCs = uFrame.pu16[1];
3607 uNewFlags = uFrame.pu16[2];
3608 uNewRsp = uFrame.pu16[3];
3609 uNewSs = uFrame.pu16[4];
3610 }
3611 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
3612 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3613 { /* extremely like */ }
3614 else
3615 return rcStrict;
3616 Log7(("iretq stack: cs:rip=%04x:%016RX64 rflags=%016RX64 ss:rsp=%04x:%016RX64\n", uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
3617
3618 /*
3619 * Check stuff.
3620 */
3621 /* Read the CS descriptor. */
3622 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3623 {
3624 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3625 return iemRaiseGeneralProtectionFault0(pVCpu);
3626 }
3627
3628 IEMSELDESC DescCS;
3629 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3630 if (rcStrict != VINF_SUCCESS)
3631 {
3632 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
3633 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3634 return rcStrict;
3635 }
3636
3637 /* Must be a code descriptor. */
3638 if ( !DescCS.Legacy.Gen.u1DescType
3639 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3640 {
3641 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
3642 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3643 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3644 }
3645
3646 /* Privilege checks. */
3647 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
3648 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3649 {
3650 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3651 {
3652 Log(("iret %04x:%016RX64 - RPL != DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3653 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3654 }
3655 }
3656 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3657 {
3658 Log(("iret %04x:%016RX64 - RPL < DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3659 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3660 }
3661 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl)
3662 {
3663 Log(("iret %04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl));
3664 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3665 }
3666
3667 /* Present? */
3668 if (!DescCS.Legacy.Gen.u1Present)
3669 {
3670 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3671 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3672 }
3673
3674 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3675
3676 /* Read the SS descriptor. */
3677 IEMSELDESC DescSS;
3678 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3679 {
3680 if ( !DescCS.Legacy.Gen.u1Long
3681 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
3682 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
3683 {
3684 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3685 return iemRaiseGeneralProtectionFault0(pVCpu);
3686 }
3687 /* Make sure SS is sensible, marked as accessed etc. */
3688 iemMemFakeStackSelDesc(&DescSS, (uNewSs & X86_SEL_RPL));
3689 }
3690 else
3691 {
3692 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
3693 if (rcStrict != VINF_SUCCESS)
3694 {
3695 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
3696 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3697 return rcStrict;
3698 }
3699 }
3700
3701 /* Privilege checks. */
3702 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3703 {
3704 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3705 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3706 }
3707
3708 uint32_t cbLimitSs;
3709 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3710 cbLimitSs = UINT32_MAX;
3711 else
3712 {
3713 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3714 {
3715 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
3716 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
3717 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3718 }
3719
3720 /* Must be a writeable data segment descriptor. */
3721 if (!DescSS.Legacy.Gen.u1DescType)
3722 {
3723 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
3724 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3725 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3726 }
3727 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3728 {
3729 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
3730 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3731 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3732 }
3733
3734 /* Present? */
3735 if (!DescSS.Legacy.Gen.u1Present)
3736 {
3737 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3738 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSs);
3739 }
3740 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3741 }
3742
3743 /* Check EIP. */
3744 if (DescCS.Legacy.Gen.u1Long)
3745 {
3746 if (!IEM_IS_CANONICAL(uNewRip))
3747 {
3748 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
3749 uNewCs, uNewRip, uNewSs, uNewRsp));
3750 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3751 }
3752 }
3753 else
3754 {
3755 if (uNewRip > cbLimitCS)
3756 {
3757 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
3758 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
3759 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3760 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3761 }
3762 }
3763
3764 /*
3765 * Commit the changes, marking CS and SS accessed first since
3766 * that may fail.
3767 */
3768 /** @todo where exactly are these actually marked accessed by a real CPU? */
3769 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3770 {
3771 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3772 if (rcStrict != VINF_SUCCESS)
3773 return rcStrict;
3774 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3775 }
3776 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3777 {
3778 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSs);
3779 if (rcStrict != VINF_SUCCESS)
3780 return rcStrict;
3781 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3782 }
3783
3784 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3785 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3786 if (enmEffOpSize != IEMMODE_16BIT)
3787 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3788 if (pVCpu->iem.s.uCpl == 0)
3789 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
3790 else if (pVCpu->iem.s.uCpl <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL)
3791 fEFlagsMask |= X86_EFL_IF;
3792 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu);
3793 fEFlagsNew &= ~fEFlagsMask;
3794 fEFlagsNew |= uNewFlags & fEFlagsMask;
3795#ifdef DBGFTRACE_ENABLED
3796 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
3797 pVCpu->iem.s.uCpl, uNewCpl, pVCpu->cpum.GstCtx.rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);
3798#endif
3799
3800 IEMMISC_SET_EFL(pVCpu, fEFlagsNew);
3801 pVCpu->cpum.GstCtx.rip = uNewRip;
3802 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3803 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3804 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3805 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3806 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3807 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3808 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
3809 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long || pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig)
3810 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3811 else
3812 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
3813 pVCpu->cpum.GstCtx.ss.Sel = uNewSs;
3814 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs;
3815 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3816 {
3817 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3818 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
3819 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3820 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3821 Log2(("iretq new SS: NULL\n"));
3822 }
3823 else
3824 {
3825 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3826 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3827 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
3828 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3829 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u));
3830 }
3831
3832 if (pVCpu->iem.s.uCpl != uNewCpl)
3833 {
3834 pVCpu->iem.s.uCpl = uNewCpl;
3835 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.ds);
3836 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.es);
3837 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.fs);
3838 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.gs);
3839 }
3840
3841 /* Flush the prefetch buffer. */
3842 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo may light flush if the ring + mode doesn't change */
3843
3844/** @todo single stepping */
3845 return VINF_SUCCESS;
3846}
3847
3848
3849/**
3850 * Implements iret.
3851 *
3852 * @param enmEffOpSize The effective operand size.
3853 */
3854IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
3855{
3856 bool fBlockingNmi = CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx);
3857
3858#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3859 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3860 {
3861 /*
3862 * Record whether NMI (or virtual-NMI) blocking is in effect during the execution
3863 * of this IRET instruction. We need to provide this information as part of some
3864 * VM-exits.
3865 *
3866 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events".
3867 */
3868 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_VIRT_NMI))
3869 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking;
3870 else
3871 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = fBlockingNmi;
3872
3873 /*
3874 * If "NMI exiting" is set, IRET does not affect blocking of NMIs.
3875 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3876 */
3877 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_NMI_EXIT))
3878 fBlockingNmi = false;
3879
3880 /* Clear virtual-NMI blocking, if any, before causing any further exceptions. */
3881 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false;
3882 }
3883#endif
3884
3885 /*
3886 * The SVM nested-guest intercept for IRET takes priority over all exceptions,
3887 * The NMI is still held pending (which I assume means blocking of further NMIs
3888 * is in effect).
3889 *
3890 * See AMD spec. 15.9 "Instruction Intercepts".
3891 * See AMD spec. 15.21.9 "NMI Support".
3892 */
3893 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IRET))
3894 {
3895 Log(("iret: Guest intercept -> #VMEXIT\n"));
3896 IEM_SVM_UPDATE_NRIP(pVCpu);
3897 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3898 }
3899
3900 /*
3901 * Clear NMI blocking, if any, before causing any further exceptions.
3902 * See Intel spec. 6.7.1 "Handling Multiple NMIs".
3903 */
3904 if (fBlockingNmi)
3905 CPUMClearInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3906
3907 /*
3908 * Call a mode specific worker.
3909 */
3910 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
3911 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
3912 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3913 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
3914 return IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize);
3915 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
3916}
3917
3918
3919static void iemLoadallSetSelector(PVMCPUCC pVCpu, uint8_t iSegReg, uint16_t uSel)
3920{
3921 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
3922
3923 pHid->Sel = uSel;
3924 pHid->ValidSel = uSel;
3925 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
3926}
3927
3928
3929static void iemLoadall286SetDescCache(PVMCPUCC pVCpu, uint8_t iSegReg, uint8_t const *pbMem)
3930{
3931 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
3932
3933 /* The base is in the first three bytes. */
3934 pHid->u64Base = pbMem[0] + (pbMem[1] << 8) + (pbMem[2] << 16);
3935 /* The attributes are in the fourth byte. */
3936 pHid->Attr.u = pbMem[3];
3937 /* The limit is in the last two bytes. */
3938 pHid->u32Limit = pbMem[4] + (pbMem[5] << 8);
3939}
3940
3941
3942/**
3943 * Implements 286 LOADALL (286 CPUs only).
3944 */
3945IEM_CIMPL_DEF_0(iemCImpl_loadall286)
3946{
3947 NOREF(cbInstr);
3948
3949 /* Data is loaded from a buffer at 800h. No checks are done on the
3950 * validity of loaded state.
3951 *
3952 * LOADALL only loads the internal CPU state, it does not access any
3953 * GDT, LDT, or similar tables.
3954 */
3955
3956 if (pVCpu->iem.s.uCpl != 0)
3957 {
3958 Log(("loadall286: CPL must be 0 not %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
3959 return iemRaiseGeneralProtectionFault0(pVCpu);
3960 }
3961
3962 uint8_t const *pbMem = NULL;
3963 uint16_t const *pa16Mem;
3964 uint8_t const *pa8Mem;
3965 RTGCPHYS GCPtrStart = 0x800; /* Fixed table location. */
3966 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pbMem, 0x66, UINT8_MAX, GCPtrStart, IEM_ACCESS_SYS_R, 0);
3967 if (rcStrict != VINF_SUCCESS)
3968 return rcStrict;
3969
3970 /* The MSW is at offset 0x06. */
3971 pa16Mem = (uint16_t const *)(pbMem + 0x06);
3972 /* Even LOADALL can't clear the MSW.PE bit, though it can set it. */
3973 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3974 uNewCr0 |= *pa16Mem & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3975 uint64_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
3976
3977 CPUMSetGuestCR0(pVCpu, uNewCr0);
3978 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCr0);
3979
3980 /* Inform PGM if mode changed. */
3981 if ((uNewCr0 & X86_CR0_PE) != (uOldCr0 & X86_CR0_PE))
3982 {
3983 int rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
3984 AssertRCReturn(rc, rc);
3985 /* ignore informational status codes */
3986 }
3987 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
3988 false /* fForce */);
3989
3990 /* TR selector is at offset 0x16. */
3991 pa16Mem = (uint16_t const *)(pbMem + 0x16);
3992 pVCpu->cpum.GstCtx.tr.Sel = pa16Mem[0];
3993 pVCpu->cpum.GstCtx.tr.ValidSel = pa16Mem[0];
3994 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3995
3996 /* Followed by FLAGS... */
3997 pVCpu->cpum.GstCtx.eflags.u = pa16Mem[1] | X86_EFL_1;
3998 pVCpu->cpum.GstCtx.ip = pa16Mem[2]; /* ...and IP. */
3999
4000 /* LDT is at offset 0x1C. */
4001 pa16Mem = (uint16_t const *)(pbMem + 0x1C);
4002 pVCpu->cpum.GstCtx.ldtr.Sel = pa16Mem[0];
4003 pVCpu->cpum.GstCtx.ldtr.ValidSel = pa16Mem[0];
4004 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4005
4006 /* Segment registers are at offset 0x1E. */
4007 pa16Mem = (uint16_t const *)(pbMem + 0x1E);
4008 iemLoadallSetSelector(pVCpu, X86_SREG_DS, pa16Mem[0]);
4009 iemLoadallSetSelector(pVCpu, X86_SREG_SS, pa16Mem[1]);
4010 iemLoadallSetSelector(pVCpu, X86_SREG_CS, pa16Mem[2]);
4011 iemLoadallSetSelector(pVCpu, X86_SREG_ES, pa16Mem[3]);
4012
4013 /* GPRs are at offset 0x26. */
4014 pa16Mem = (uint16_t const *)(pbMem + 0x26);
4015 pVCpu->cpum.GstCtx.di = pa16Mem[0];
4016 pVCpu->cpum.GstCtx.si = pa16Mem[1];
4017 pVCpu->cpum.GstCtx.bp = pa16Mem[2];
4018 pVCpu->cpum.GstCtx.sp = pa16Mem[3];
4019 pVCpu->cpum.GstCtx.bx = pa16Mem[4];
4020 pVCpu->cpum.GstCtx.dx = pa16Mem[5];
4021 pVCpu->cpum.GstCtx.cx = pa16Mem[6];
4022 pVCpu->cpum.GstCtx.ax = pa16Mem[7];
4023
4024 /* Descriptor caches are at offset 0x36, 6 bytes per entry. */
4025 iemLoadall286SetDescCache(pVCpu, X86_SREG_ES, pbMem + 0x36);
4026 iemLoadall286SetDescCache(pVCpu, X86_SREG_CS, pbMem + 0x3C);
4027 iemLoadall286SetDescCache(pVCpu, X86_SREG_SS, pbMem + 0x42);
4028 iemLoadall286SetDescCache(pVCpu, X86_SREG_DS, pbMem + 0x48);
4029
4030 /* GDTR contents are at offset 0x4E, 6 bytes. */
4031 RTGCPHYS GCPtrBase;
4032 uint16_t cbLimit;
4033 pa8Mem = pbMem + 0x4E;
4034 /* NB: Fourth byte "should be zero"; we are ignoring it. */
4035 GCPtrBase = pa8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16);
4036 cbLimit = pa8Mem[4] + (pa8Mem[5] << 8);
4037 CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
4038
4039 /* IDTR contents are at offset 0x5A, 6 bytes. */
4040 pa8Mem = pbMem + 0x5A;
4041 GCPtrBase = pa8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16);
4042 cbLimit = pa8Mem[4] + (pa8Mem[5] << 8);
4043 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
4044
4045 Log(("LOADALL: GDTR:%08RX64/%04X, IDTR:%08RX64/%04X\n", pVCpu->cpum.GstCtx.gdtr.pGdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.idtr.pIdt, pVCpu->cpum.GstCtx.idtr.cbIdt));
4046 Log(("LOADALL: CS:%04X, CS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.cs.u64Base, pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.Attr.u));
4047 Log(("LOADALL: DS:%04X, DS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.ds.u64Base, pVCpu->cpum.GstCtx.ds.u32Limit, pVCpu->cpum.GstCtx.ds.Attr.u));
4048 Log(("LOADALL: ES:%04X, ES base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.es.Sel, pVCpu->cpum.GstCtx.es.u64Base, pVCpu->cpum.GstCtx.es.u32Limit, pVCpu->cpum.GstCtx.es.Attr.u));
4049 Log(("LOADALL: SS:%04X, SS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u));
4050 Log(("LOADALL: SI:%04X, DI:%04X, AX:%04X, BX:%04X, CX:%04X, DX:%04X\n", pVCpu->cpum.GstCtx.si, pVCpu->cpum.GstCtx.di, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.cx, pVCpu->cpum.GstCtx.dx));
4051
4052 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pbMem, IEM_ACCESS_SYS_R);
4053 if (rcStrict != VINF_SUCCESS)
4054 return rcStrict;
4055
4056 /* The CPL may change. It is taken from the "DPL fields of the SS and CS
4057 * descriptor caches" but there is no word as to what happens if those are
4058 * not identical (probably bad things).
4059 */
4060 pVCpu->iem.s.uCpl = pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl;
4061
4062 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS | CPUM_CHANGED_IDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_TR | CPUM_CHANGED_LDTR);
4063
4064 /* Flush the prefetch buffer. */
4065 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4066
4067/** @todo single stepping */
4068 return rcStrict;
4069}
4070
4071
4072/**
4073 * Implements SYSCALL (AMD and Intel64).
4074 */
4075IEM_CIMPL_DEF_0(iemCImpl_syscall)
4076{
4077 /** @todo hack, LOADALL should be decoded as such on a 286. */
4078 if (RT_UNLIKELY(pVCpu->iem.s.uTargetCpu == IEMTARGETCPU_286))
4079 return iemCImpl_loadall286(pVCpu, cbInstr);
4080
4081 /*
4082 * Check preconditions.
4083 *
4084 * Note that CPUs described in the documentation may load a few odd values
4085 * into CS and SS than we allow here. This has yet to be checked on real
4086 * hardware.
4087 */
4088 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE))
4089 {
4090 Log(("syscall: Not enabled in EFER -> #UD\n"));
4091 return iemRaiseUndefinedOpcode(pVCpu);
4092 }
4093 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4094 {
4095 Log(("syscall: Protected mode is required -> #GP(0)\n"));
4096 return iemRaiseGeneralProtectionFault0(pVCpu);
4097 }
4098 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4099 {
4100 Log(("syscall: Only available in long mode on intel -> #UD\n"));
4101 return iemRaiseUndefinedOpcode(pVCpu);
4102 }
4103
4104 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS);
4105
4106 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
4107 /** @todo what about LDT selectors? Shouldn't matter, really. */
4108 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4109 uint16_t uNewSs = uNewCs + 8;
4110 if (uNewCs == 0 || uNewSs == 0)
4111 {
4112 /** @todo Neither Intel nor AMD document this check. */
4113 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4114 return iemRaiseGeneralProtectionFault0(pVCpu);
4115 }
4116
4117 /* Long mode and legacy mode differs. */
4118 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4119 {
4120 uint64_t uNewRip = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.msrLSTAR : pVCpu->cpum.GstCtx. msrCSTAR;
4121
4122 /* This test isn't in the docs, but I'm not trusting the guys writing
4123 the MSRs to have validated the values as canonical like they should. */
4124 if (!IEM_IS_CANONICAL(uNewRip))
4125 {
4126 /** @todo Intel claims this can't happen because IA32_LSTAR MSR can't be written with non-canonical address. */
4127 Log(("syscall: New RIP not canonical -> #UD\n"));
4128 return iemRaiseUndefinedOpcode(pVCpu);
4129 }
4130
4131 /*
4132 * Commit it.
4133 */
4134 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, uNewRip));
4135 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.rip + cbInstr;
4136 pVCpu->cpum.GstCtx.rip = uNewRip;
4137
4138 pVCpu->cpum.GstCtx.rflags.u &= ~X86_EFL_RF;
4139 pVCpu->cpum.GstCtx.r11 = pVCpu->cpum.GstCtx.rflags.u;
4140 pVCpu->cpum.GstCtx.rflags.u &= ~pVCpu->cpum.GstCtx.msrSFMASK;
4141 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_1;
4142
4143 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
4144 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
4145 }
4146 else
4147 {
4148 /*
4149 * Commit it.
4150 */
4151 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, (uint32_t)(pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
4152 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.eip + cbInstr;
4153 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
4154 pVCpu->cpum.GstCtx.rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
4155
4156 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
4157 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
4158 }
4159 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
4160 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
4161 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4162 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4163 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4164
4165 pVCpu->cpum.GstCtx.ss.Sel = uNewSs;
4166 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs;
4167 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4168 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4169 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4170
4171 pVCpu->iem.s.uCpl = 0;
4172 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
4173
4174 /* Flush the prefetch buffer. */
4175 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4176
4177/** @todo single step */
4178 return VINF_SUCCESS;
4179}
4180
4181
4182/**
4183 * Implements SYSRET (AMD and Intel64).
4184 */
4185IEM_CIMPL_DEF_0(iemCImpl_sysret)
4186
4187{
4188 RT_NOREF_PV(cbInstr);
4189
4190 /*
4191 * Check preconditions.
4192 *
4193 * Note that CPUs described in the documentation may load a few odd values
4194 * into CS and SS than we allow here. This has yet to be checked on real
4195 * hardware.
4196 */
4197 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE))
4198 {
4199 Log(("sysret: Not enabled in EFER -> #UD\n"));
4200 return iemRaiseUndefinedOpcode(pVCpu);
4201 }
4202 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4203 {
4204 Log(("sysret: Only available in long mode on intel -> #UD\n"));
4205 return iemRaiseUndefinedOpcode(pVCpu);
4206 }
4207 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4208 {
4209 Log(("sysret: Protected mode is required -> #GP(0)\n"));
4210 return iemRaiseGeneralProtectionFault0(pVCpu);
4211 }
4212 if (pVCpu->iem.s.uCpl != 0)
4213 {
4214 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
4215 return iemRaiseGeneralProtectionFault0(pVCpu);
4216 }
4217
4218 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS);
4219
4220 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
4221 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4222 uint16_t uNewSs = uNewCs + 8;
4223 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
4224 uNewCs += 16;
4225 if (uNewCs == 0 || uNewSs == 0)
4226 {
4227 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4228 return iemRaiseGeneralProtectionFault0(pVCpu);
4229 }
4230
4231 /*
4232 * Commit it.
4233 */
4234 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4235 {
4236 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
4237 {
4238 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.r11));
4239 /* Note! We disregard intel manual regarding the RCX canonical
4240 check, ask intel+xen why AMD doesn't do it. */
4241 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx;
4242 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4243 | (3 << X86DESCATTR_DPL_SHIFT);
4244 }
4245 else
4246 {
4247 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.r11));
4248 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.ecx;
4249 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4250 | (3 << X86DESCATTR_DPL_SHIFT);
4251 }
4252 /** @todo testcase: See what kind of flags we can make SYSRET restore and
4253 * what it really ignores. RF and VM are hinted at being zero, by AMD.
4254 * Intel says: RFLAGS := (R11 & 3C7FD7H) | 2; */
4255 pVCpu->cpum.GstCtx.rflags.u = pVCpu->cpum.GstCtx.r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
4256 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_1;
4257 }
4258 else
4259 {
4260 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx));
4261 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx;
4262 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_IF;
4263 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4264 | (3 << X86DESCATTR_DPL_SHIFT);
4265 }
4266 pVCpu->cpum.GstCtx.cs.Sel = uNewCs | 3;
4267 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs | 3;
4268 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4269 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4270 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4271
4272 pVCpu->cpum.GstCtx.ss.Sel = uNewSs | 3;
4273 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs | 3;
4274 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4275 /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */
4276 pVCpu->cpum.GstCtx.ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
4277 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
4278 * on sysret. */
4279
4280 pVCpu->iem.s.uCpl = 3;
4281 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
4282
4283 /* Flush the prefetch buffer. */
4284 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4285
4286/** @todo single step */
4287 return VINF_SUCCESS;
4288}
4289
4290
4291/**
4292 * Implements SYSENTER (Intel, 32-bit AMD).
4293 */
4294IEM_CIMPL_DEF_0(iemCImpl_sysenter)
4295{
4296 RT_NOREF(cbInstr);
4297
4298 /*
4299 * Check preconditions.
4300 *
4301 * Note that CPUs described in the documentation may load a few odd values
4302 * into CS and SS than we allow here. This has yet to be checked on real
4303 * hardware.
4304 */
4305 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter)
4306 {
4307 Log(("sysenter: not supported -=> #UD\n"));
4308 return iemRaiseUndefinedOpcode(pVCpu);
4309 }
4310 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4311 {
4312 Log(("sysenter: Protected or long mode is required -> #GP(0)\n"));
4313 return iemRaiseGeneralProtectionFault0(pVCpu);
4314 }
4315 bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
4316 if (IEM_IS_GUEST_CPU_AMD(pVCpu) && fIsLongMode)
4317 {
4318 Log(("sysenter: Only available in protected mode on AMD -> #UD\n"));
4319 return iemRaiseUndefinedOpcode(pVCpu);
4320 }
4321 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4322 uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs;
4323 if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0)
4324 {
4325 Log(("sysenter: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs));
4326 return iemRaiseGeneralProtectionFault0(pVCpu);
4327 }
4328
4329 /* This test isn't in the docs, it's just a safeguard against missing
4330 canonical checks when writing the registers. */
4331 if (RT_LIKELY( !fIsLongMode
4332 || ( IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.eip)
4333 && IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.esp))))
4334 { /* likely */ }
4335 else
4336 {
4337 Log(("sysenter: SYSENTER_EIP = %#RX64 or/and SYSENTER_ESP = %#RX64 not canonical -> #GP(0)\n",
4338 pVCpu->cpum.GstCtx.SysEnter.eip, pVCpu->cpum.GstCtx.SysEnter.esp));
4339 return iemRaiseUndefinedOpcode(pVCpu);
4340 }
4341
4342/** @todo Test: Sysenter from ring-0, ring-1 and ring-2. */
4343
4344 /*
4345 * Update registers and commit.
4346 */
4347 if (fIsLongMode)
4348 {
4349 Log(("sysenter: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
4350 pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, pVCpu->cpum.GstCtx.SysEnter.eip));
4351 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.SysEnter.eip;
4352 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.SysEnter.esp;
4353 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4354 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC;
4355 }
4356 else
4357 {
4358 Log(("sysenter: %04x:%08RX32 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs, (uint32_t)pVCpu->cpum.GstCtx.rip,
4359 pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip));
4360 pVCpu->cpum.GstCtx.rip = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip;
4361 pVCpu->cpum.GstCtx.rsp = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.esp;
4362 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4363 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC;
4364 }
4365 pVCpu->cpum.GstCtx.cs.Sel = uNewCs & X86_SEL_MASK_OFF_RPL;
4366 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs & X86_SEL_MASK_OFF_RPL;
4367 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4368 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4369 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4370
4371 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs & X86_SEL_MASK_OFF_RPL) + 8;
4372 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs & X86_SEL_MASK_OFF_RPL) + 8;
4373 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4374 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4375 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4376 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC;
4377 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4378
4379 pVCpu->cpum.GstCtx.rflags.Bits.u1IF = 0;
4380 pVCpu->cpum.GstCtx.rflags.Bits.u1VM = 0;
4381 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
4382
4383 pVCpu->iem.s.uCpl = 0;
4384
4385 /* Flush the prefetch buffer. */
4386 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4387
4388/** @todo single stepping */
4389 return VINF_SUCCESS;
4390}
4391
4392
4393/**
4394 * Implements SYSEXIT (Intel, 32-bit AMD).
4395 *
4396 * @param enmEffOpSize The effective operand size.
4397 */
4398IEM_CIMPL_DEF_1(iemCImpl_sysexit, IEMMODE, enmEffOpSize)
4399{
4400 RT_NOREF(cbInstr);
4401
4402 /*
4403 * Check preconditions.
4404 *
4405 * Note that CPUs described in the documentation may load a few odd values
4406 * into CS and SS than we allow here. This has yet to be checked on real
4407 * hardware.
4408 */
4409 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter)
4410 {
4411 Log(("sysexit: not supported -=> #UD\n"));
4412 return iemRaiseUndefinedOpcode(pVCpu);
4413 }
4414 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4415 {
4416 Log(("sysexit: Protected or long mode is required -> #GP(0)\n"));
4417 return iemRaiseGeneralProtectionFault0(pVCpu);
4418 }
4419 bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
4420 if (IEM_IS_GUEST_CPU_AMD(pVCpu) && fIsLongMode)
4421 {
4422 Log(("sysexit: Only available in protected mode on AMD -> #UD\n"));
4423 return iemRaiseUndefinedOpcode(pVCpu);
4424 }
4425 if (pVCpu->iem.s.uCpl != 0)
4426 {
4427 Log(("sysexit: CPL(=%u) != 0 -> #GP(0)\n", pVCpu->iem.s.uCpl));
4428 return iemRaiseGeneralProtectionFault0(pVCpu);
4429 }
4430 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4431 uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs;
4432 if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0)
4433 {
4434 Log(("sysexit: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs));
4435 return iemRaiseGeneralProtectionFault0(pVCpu);
4436 }
4437
4438 /*
4439 * Update registers and commit.
4440 */
4441 if (enmEffOpSize == IEMMODE_64BIT)
4442 {
4443 Log(("sysexit: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
4444 pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 32, pVCpu->cpum.GstCtx.rcx));
4445 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rdx;
4446 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.rcx;
4447 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4448 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4449 pVCpu->cpum.GstCtx.cs.Sel = (uNewCs | 3) + 32;
4450 pVCpu->cpum.GstCtx.cs.ValidSel = (uNewCs | 3) + 32;
4451 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs | 3) + 40;
4452 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs | 3) + 40;
4453 }
4454 else
4455 {
4456 Log(("sysexit: %04x:%08RX64 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
4457 pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 16, (uint32_t)pVCpu->cpum.GstCtx.edx));
4458 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.edx;
4459 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.ecx;
4460 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4461 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4462 pVCpu->cpum.GstCtx.cs.Sel = (uNewCs | 3) + 16;
4463 pVCpu->cpum.GstCtx.cs.ValidSel = (uNewCs | 3) + 16;
4464 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs | 3) + 24;
4465 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs | 3) + 24;
4466 }
4467 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4468 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4469 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4470
4471 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4472 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4473 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4474 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4475 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4476 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
4477
4478 pVCpu->iem.s.uCpl = 3;
4479/** @todo single stepping */
4480
4481 /* Flush the prefetch buffer. */
4482 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4483
4484 return VINF_SUCCESS;
4485}
4486
4487
4488/**
4489 * Completes a MOV SReg,XXX or POP SReg instruction.
4490 *
4491 * When not modifying SS or when we're already in an interrupt shadow we
4492 * can update RIP and finish the instruction the normal way.
4493 *
4494 * Otherwise, the MOV/POP SS interrupt shadow that we now enable will block
4495 * both TF and DBx events. The TF will be ignored while the DBx ones will
4496 * be delayed till the next instruction boundrary. For more details see
4497 * @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching Stacks}.
4498 */
4499DECLINLINE(VBOXSTRICTRC) iemCImpl_LoadSRegFinish(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iSegReg)
4500{
4501 if (iSegReg != X86_SREG_SS || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
4502 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
4503
4504 iemRegAddToRip(pVCpu, cbInstr);
4505 pVCpu->cpum.GstCtx.eflags.uBoth &= ~X86_EFL_RF; /* Shadow int isn't set and DRx is delayed, so only clear RF. */
4506 CPUMSetInInterruptShadowSs(&pVCpu->cpum.GstCtx);
4507
4508 return VINF_SUCCESS;
4509}
4510
4511
4512/**
4513 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
4514 *
4515 * @param pVCpu The cross context virtual CPU structure of the calling
4516 * thread.
4517 * @param iSegReg The segment register number (valid).
4518 * @param uSel The new selector value.
4519 */
4520static VBOXSTRICTRC iemCImpl_LoadSRegWorker(PVMCPUCC pVCpu, uint8_t iSegReg, uint16_t uSel)
4521{
4522 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4523 uint16_t *pSel = iemSRegRef(pVCpu, iSegReg);
4524 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4525
4526 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
4527
4528 /*
4529 * Real mode and V8086 mode are easy.
4530 */
4531 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4532 {
4533 *pSel = uSel;
4534 pHid->u64Base = (uint32_t)uSel << 4;
4535 pHid->ValidSel = uSel;
4536 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4537#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
4538 /** @todo Does the CPU actually load limits and attributes in the
4539 * real/V8086 mode segment load case? It doesn't for CS in far
4540 * jumps... Affects unreal mode. */
4541 pHid->u32Limit = 0xffff;
4542 pHid->Attr.u = 0;
4543 pHid->Attr.n.u1Present = 1;
4544 pHid->Attr.n.u1DescType = 1;
4545 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
4546 ? X86_SEL_TYPE_RW
4547 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
4548#endif
4549 }
4550 /*
4551 * Protected mode.
4552 *
4553 * Check if it's a null segment selector value first, that's OK for DS, ES,
4554 * FS and GS. If not null, then we have to load and parse the descriptor.
4555 */
4556 else if (!(uSel & X86_SEL_MASK_OFF_RPL))
4557 {
4558 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
4559 if (iSegReg == X86_SREG_SS)
4560 {
4561 /* In 64-bit kernel mode, the stack can be 0 because of the way
4562 interrupts are dispatched. AMD seems to have a slighly more
4563 relaxed relationship to SS.RPL than intel does. */
4564 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
4565 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
4566 || pVCpu->iem.s.uCpl > 2
4567 || ( uSel != pVCpu->iem.s.uCpl
4568 && !IEM_IS_GUEST_CPU_AMD(pVCpu)) )
4569 {
4570 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
4571 return iemRaiseGeneralProtectionFault0(pVCpu);
4572 }
4573 }
4574
4575 *pSel = uSel; /* Not RPL, remember :-) */
4576 iemHlpLoadNullDataSelectorProt(pVCpu, pHid, uSel);
4577 if (iSegReg == X86_SREG_SS)
4578 pHid->Attr.u |= pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT;
4579 }
4580 else
4581 {
4582
4583 /* Fetch the descriptor. */
4584 IEMSELDESC Desc;
4585 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
4586 if (rcStrict != VINF_SUCCESS)
4587 return rcStrict;
4588
4589 /* Check GPs first. */
4590 if (!Desc.Legacy.Gen.u1DescType)
4591 {
4592 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
4593 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4594 }
4595 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
4596 {
4597 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4598 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
4599 {
4600 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
4601 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4602 }
4603 if ((uSel & X86_SEL_RPL) != pVCpu->iem.s.uCpl)
4604 {
4605 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pVCpu->iem.s.uCpl));
4606 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4607 }
4608 if (Desc.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl)
4609 {
4610 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4611 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4612 }
4613 }
4614 else
4615 {
4616 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4617 {
4618 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
4619 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4620 }
4621 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4622 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4623 {
4624#if 0 /* this is what intel says. */
4625 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4626 && pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4627 {
4628 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
4629 iSegReg, uSel, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl));
4630 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4631 }
4632#else /* this is what makes more sense. */
4633 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4634 {
4635 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4636 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4637 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4638 }
4639 if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4640 {
4641 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4642 iSegReg, uSel, pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl));
4643 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4644 }
4645#endif
4646 }
4647 }
4648
4649 /* Is it there? */
4650 if (!Desc.Legacy.Gen.u1Present)
4651 {
4652 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4653 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
4654 }
4655
4656 /* The base and limit. */
4657 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4658 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
4659
4660 /*
4661 * Ok, everything checked out fine. Now set the accessed bit before
4662 * committing the result into the registers.
4663 */
4664 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4665 {
4666 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
4667 if (rcStrict != VINF_SUCCESS)
4668 return rcStrict;
4669 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4670 }
4671
4672 /* commit */
4673 *pSel = uSel;
4674 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4675 pHid->u32Limit = cbLimit;
4676 pHid->u64Base = u64Base;
4677 pHid->ValidSel = uSel;
4678 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4679
4680 /** @todo check if the hidden bits are loaded correctly for 64-bit
4681 * mode. */
4682 }
4683
4684 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4685 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4686 return VINF_SUCCESS;
4687}
4688
4689
4690/**
4691 * Implements 'mov SReg, r/m'.
4692 *
4693 * @param iSegReg The segment register number (valid).
4694 * @param uSel The new selector value.
4695 */
4696IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
4697{
4698 VBOXSTRICTRC rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4699 if (rcStrict == VINF_SUCCESS)
4700 rcStrict = iemCImpl_LoadSRegFinish(pVCpu, cbInstr, iSegReg);
4701 return rcStrict;
4702}
4703
4704
4705/**
4706 * Implements 'pop SReg'.
4707 *
4708 * @param iSegReg The segment register number (valid).
4709 * @param enmEffOpSize The efficient operand size (valid).
4710 */
4711IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4712{
4713 VBOXSTRICTRC rcStrict;
4714
4715 /*
4716 * Read the selector off the stack and join paths with mov ss, reg.
4717 */
4718 RTUINT64U TmpRsp;
4719 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
4720 switch (enmEffOpSize)
4721 {
4722 case IEMMODE_16BIT:
4723 {
4724 uint16_t uSel;
4725 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp);
4726 if (rcStrict == VINF_SUCCESS)
4727 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4728 break;
4729 }
4730
4731 case IEMMODE_32BIT:
4732 {
4733 uint32_t u32Value;
4734 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp);
4735 if (rcStrict == VINF_SUCCESS)
4736 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u32Value);
4737 break;
4738 }
4739
4740 case IEMMODE_64BIT:
4741 {
4742 uint64_t u64Value;
4743 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp);
4744 if (rcStrict == VINF_SUCCESS)
4745 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u64Value);
4746 break;
4747 }
4748 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4749 }
4750
4751 /*
4752 * If the load succeeded, commit the stack change and finish the instruction.
4753 */
4754 if (rcStrict == VINF_SUCCESS)
4755 {
4756 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
4757 rcStrict = iemCImpl_LoadSRegFinish(pVCpu, cbInstr, iSegReg);
4758 }
4759
4760 return rcStrict;
4761}
4762
4763
4764/**
4765 * Implements lgs, lfs, les, lds & lss.
4766 */
4767IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg, uint16_t, uSel, uint64_t, offSeg, uint8_t, iSegReg, uint8_t, iGReg, IEMMODE, enmEffOpSize)
4768{
4769 /*
4770 * Use iemCImpl_LoadSRegWorker to do the tricky segment register loading.
4771 */
4772 /** @todo verify and test that mov, pop and lXs works the segment
4773 * register loading in the exact same way. */
4774 VBOXSTRICTRC rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4775 if (rcStrict == VINF_SUCCESS)
4776 {
4777 switch (enmEffOpSize)
4778 {
4779 case IEMMODE_16BIT:
4780 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4781 break;
4782 case IEMMODE_32BIT:
4783 case IEMMODE_64BIT:
4784 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg;
4785 break;
4786 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4787 }
4788 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
4789 }
4790 return rcStrict;
4791}
4792
4793
4794/**
4795 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
4796 *
4797 * @retval VINF_SUCCESS on success.
4798 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
4799 * @retval iemMemFetchSysU64 return value.
4800 *
4801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4802 * @param uSel The selector value.
4803 * @param fAllowSysDesc Whether system descriptors are OK or not.
4804 * @param pDesc Where to return the descriptor on success.
4805 */
4806static VBOXSTRICTRC iemCImpl_LoadDescHelper(PVMCPUCC pVCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
4807{
4808 pDesc->Long.au64[0] = 0;
4809 pDesc->Long.au64[1] = 0;
4810
4811 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */
4812 return VINF_IEM_SELECTOR_NOT_OK;
4813
4814 /* Within the table limits? */
4815 RTGCPTR GCPtrBase;
4816 if (uSel & X86_SEL_LDT)
4817 {
4818 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
4819 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
4820 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
4821 return VINF_IEM_SELECTOR_NOT_OK;
4822 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
4823 }
4824 else
4825 {
4826 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR);
4827 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
4828 return VINF_IEM_SELECTOR_NOT_OK;
4829 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
4830 }
4831
4832 /* Fetch the descriptor. */
4833 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4834 if (rcStrict != VINF_SUCCESS)
4835 return rcStrict;
4836 if (!pDesc->Legacy.Gen.u1DescType)
4837 {
4838 if (!fAllowSysDesc)
4839 return VINF_IEM_SELECTOR_NOT_OK;
4840 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4841 {
4842 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
4843 if (rcStrict != VINF_SUCCESS)
4844 return rcStrict;
4845 }
4846
4847 }
4848
4849 return VINF_SUCCESS;
4850}
4851
4852
4853/**
4854 * Implements verr (fWrite = false) and verw (fWrite = true).
4855 */
4856IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite)
4857{
4858 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4859
4860 /** @todo figure whether the accessed bit is set or not. */
4861
4862 bool fAccessible = true;
4863 IEMSELDESC Desc;
4864 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4865 if (rcStrict == VINF_SUCCESS)
4866 {
4867 /* Check the descriptor, order doesn't matter much here. */
4868 if ( !Desc.Legacy.Gen.u1DescType
4869 || !Desc.Legacy.Gen.u1Present)
4870 fAccessible = false;
4871 else
4872 {
4873 if ( fWrite
4874 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE
4875 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4876 fAccessible = false;
4877
4878 /** @todo testcase for the conforming behavior. */
4879 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4880 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4881 {
4882 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4883 fAccessible = false;
4884 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4885 fAccessible = false;
4886 }
4887 }
4888
4889 }
4890 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4891 fAccessible = false;
4892 else
4893 return rcStrict;
4894
4895 /* commit */
4896 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fAccessible;
4897
4898 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
4899}
4900
4901
4902/**
4903 * Implements LAR and LSL with 64-bit operand size.
4904 *
4905 * @returns VINF_SUCCESS.
4906 * @param pu64Dst Pointer to the destination register.
4907 * @param uSel The selector to load details for.
4908 * @param fIsLar true = LAR, false = LSL.
4909 */
4910IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar)
4911{
4912 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4913
4914 /** @todo figure whether the accessed bit is set or not. */
4915
4916 bool fDescOk = true;
4917 IEMSELDESC Desc;
4918 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, true /*fAllowSysDesc*/, &Desc);
4919 if (rcStrict == VINF_SUCCESS)
4920 {
4921 /*
4922 * Check the descriptor type.
4923 */
4924 if (!Desc.Legacy.Gen.u1DescType)
4925 {
4926 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4927 {
4928 if (Desc.Long.Gen.u5Zeros)
4929 fDescOk = false;
4930 else
4931 switch (Desc.Long.Gen.u4Type)
4932 {
4933 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
4934 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
4935 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
4936 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
4937 break;
4938 case AMD64_SEL_TYPE_SYS_CALL_GATE:
4939 fDescOk = fIsLar;
4940 break;
4941 default:
4942 fDescOk = false;
4943 break;
4944 }
4945 }
4946 else
4947 {
4948 switch (Desc.Long.Gen.u4Type)
4949 {
4950 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4951 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4952 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4953 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4954 case X86_SEL_TYPE_SYS_LDT:
4955 break;
4956 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4957 case X86_SEL_TYPE_SYS_TASK_GATE:
4958 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4959 fDescOk = fIsLar;
4960 break;
4961 default:
4962 fDescOk = false;
4963 break;
4964 }
4965 }
4966 }
4967 if (fDescOk)
4968 {
4969 /*
4970 * Check the RPL/DPL/CPL interaction..
4971 */
4972 /** @todo testcase for the conforming behavior. */
4973 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
4974 || !Desc.Legacy.Gen.u1DescType)
4975 {
4976 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4977 fDescOk = false;
4978 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)
4979 fDescOk = false;
4980 }
4981 }
4982
4983 if (fDescOk)
4984 {
4985 /*
4986 * All fine, start committing the result.
4987 */
4988 if (fIsLar)
4989 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00);
4990 else
4991 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy);
4992 }
4993
4994 }
4995 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4996 fDescOk = false;
4997 else
4998 return rcStrict;
4999
5000 /* commit flags value and advance rip. */
5001 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fDescOk;
5002 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5003}
5004
5005
5006/**
5007 * Implements LAR and LSL with 16-bit operand size.
5008 *
5009 * @returns VINF_SUCCESS.
5010 * @param pu16Dst Pointer to the destination register.
5011 * @param uSel The selector to load details for.
5012 * @param fIsLar true = LAR, false = LSL.
5013 */
5014IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar)
5015{
5016 uint64_t u64TmpDst = *pu16Dst;
5017 IEM_CIMPL_CALL_3(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, fIsLar);
5018 *pu16Dst = u64TmpDst;
5019 return VINF_SUCCESS;
5020}
5021
5022
5023/**
5024 * Implements lgdt.
5025 *
5026 * @param iEffSeg The segment of the new gdtr contents
5027 * @param GCPtrEffSrc The address of the new gdtr contents.
5028 * @param enmEffOpSize The effective operand size.
5029 */
5030IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
5031{
5032 if (pVCpu->iem.s.uCpl != 0)
5033 return iemRaiseGeneralProtectionFault0(pVCpu);
5034 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5035
5036 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5037 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5038 {
5039 Log(("lgdt: Guest intercept -> VM-exit\n"));
5040 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_GDTR_IDTR_ACCESS, VMXINSTRID_LGDT, cbInstr);
5041 }
5042
5043 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_WRITES))
5044 {
5045 Log(("lgdt: Guest intercept -> #VMEXIT\n"));
5046 IEM_SVM_UPDATE_NRIP(pVCpu);
5047 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5048 }
5049
5050 /*
5051 * Fetch the limit and base address.
5052 */
5053 uint16_t cbLimit;
5054 RTGCPTR GCPtrBase;
5055 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
5056 if (rcStrict == VINF_SUCCESS)
5057 {
5058 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
5059 || X86_IS_CANONICAL(GCPtrBase))
5060 {
5061 rcStrict = CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
5062 if (rcStrict == VINF_SUCCESS)
5063 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5064 }
5065 else
5066 {
5067 Log(("iemCImpl_lgdt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
5068 return iemRaiseGeneralProtectionFault0(pVCpu);
5069 }
5070 }
5071 return rcStrict;
5072}
5073
5074
5075/**
5076 * Implements sgdt.
5077 *
5078 * @param iEffSeg The segment where to store the gdtr content.
5079 * @param GCPtrEffDst The address where to store the gdtr content.
5080 */
5081IEM_CIMPL_DEF_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5082{
5083 /*
5084 * Join paths with sidt.
5085 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
5086 * you really must know.
5087 */
5088 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5089 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5090 {
5091 Log(("sgdt: Guest intercept -> VM-exit\n"));
5092 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_GDTR_IDTR_ACCESS, VMXINSTRID_SGDT, cbInstr);
5093 }
5094
5095 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_READS))
5096 {
5097 Log(("sgdt: Guest intercept -> #VMEXIT\n"));
5098 IEM_SVM_UPDATE_NRIP(pVCpu);
5099 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5100 }
5101
5102 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR);
5103 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.gdtr.pGdt, iEffSeg, GCPtrEffDst);
5104 if (rcStrict == VINF_SUCCESS)
5105 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5106 return rcStrict;
5107}
5108
5109
5110/**
5111 * Implements lidt.
5112 *
5113 * @param iEffSeg The segment of the new idtr contents
5114 * @param GCPtrEffSrc The address of the new idtr contents.
5115 * @param enmEffOpSize The effective operand size.
5116 */
5117IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
5118{
5119 if (pVCpu->iem.s.uCpl != 0)
5120 return iemRaiseGeneralProtectionFault0(pVCpu);
5121 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5122
5123 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_WRITES))
5124 {
5125 Log(("lidt: Guest intercept -> #VMEXIT\n"));
5126 IEM_SVM_UPDATE_NRIP(pVCpu);
5127 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5128 }
5129
5130 /*
5131 * Fetch the limit and base address.
5132 */
5133 uint16_t cbLimit;
5134 RTGCPTR GCPtrBase;
5135 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
5136 if (rcStrict == VINF_SUCCESS)
5137 {
5138 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
5139 || X86_IS_CANONICAL(GCPtrBase))
5140 {
5141 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
5142 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5143 }
5144 else
5145 {
5146 Log(("iemCImpl_lidt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
5147 return iemRaiseGeneralProtectionFault0(pVCpu);
5148 }
5149 }
5150 return rcStrict;
5151}
5152
5153
5154/**
5155 * Implements sidt.
5156 *
5157 * @param iEffSeg The segment where to store the idtr content.
5158 * @param GCPtrEffDst The address where to store the idtr content.
5159 */
5160IEM_CIMPL_DEF_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5161{
5162 /*
5163 * Join paths with sgdt.
5164 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
5165 * you really must know.
5166 */
5167 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
5168 {
5169 Log(("sidt: Guest intercept -> #VMEXIT\n"));
5170 IEM_SVM_UPDATE_NRIP(pVCpu);
5171 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5172 }
5173
5174 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_IDTR);
5175 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.idtr.cbIdt, pVCpu->cpum.GstCtx.idtr.pIdt, iEffSeg, GCPtrEffDst);
5176 if (rcStrict == VINF_SUCCESS)
5177 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5178 return rcStrict;
5179}
5180
5181
5182/**
5183 * Implements lldt.
5184 *
5185 * @param uNewLdt The new LDT selector value.
5186 */
5187IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
5188{
5189 /*
5190 * Check preconditions.
5191 */
5192 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5193 {
5194 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
5195 return iemRaiseUndefinedOpcode(pVCpu);
5196 }
5197 if (pVCpu->iem.s.uCpl != 0)
5198 {
5199 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pVCpu->iem.s.uCpl));
5200 return iemRaiseGeneralProtectionFault0(pVCpu);
5201 }
5202 /* Nested-guest VMX intercept. */
5203 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5204 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5205 {
5206 Log(("lldt: Guest intercept -> VM-exit\n"));
5207 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_LLDT, cbInstr);
5208 }
5209 if (uNewLdt & X86_SEL_LDT)
5210 {
5211 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
5212 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewLdt);
5213 }
5214
5215 /*
5216 * Now, loading a NULL selector is easy.
5217 */
5218 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
5219 {
5220 /* Nested-guest SVM intercept. */
5221 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
5222 {
5223 Log(("lldt: Guest intercept -> #VMEXIT\n"));
5224 IEM_SVM_UPDATE_NRIP(pVCpu);
5225 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5226 }
5227
5228 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
5229 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_LDTR;
5230 CPUMSetGuestLDTR(pVCpu, uNewLdt);
5231 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
5232 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
5233 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
5234 {
5235 /* AMD-V seems to leave the base and limit alone. */
5236 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
5237 }
5238 else
5239 {
5240 /* VT-x (Intel 3960x) seems to be doing the following. */
5241 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;
5242 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
5243 pVCpu->cpum.GstCtx.ldtr.u32Limit = UINT32_MAX;
5244 }
5245
5246 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5247 }
5248
5249 /*
5250 * Read the descriptor.
5251 */
5252 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR);
5253 IEMSELDESC Desc;
5254 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
5255 if (rcStrict != VINF_SUCCESS)
5256 return rcStrict;
5257
5258 /* Check GPs first. */
5259 if (Desc.Legacy.Gen.u1DescType)
5260 {
5261 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
5262 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5263 }
5264 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
5265 {
5266 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
5267 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5268 }
5269 uint64_t u64Base;
5270 if (!IEM_IS_LONG_MODE(pVCpu))
5271 u64Base = X86DESC_BASE(&Desc.Legacy);
5272 else
5273 {
5274 if (Desc.Long.Gen.u5Zeros)
5275 {
5276 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
5277 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5278 }
5279
5280 u64Base = X86DESC64_BASE(&Desc.Long);
5281 if (!IEM_IS_CANONICAL(u64Base))
5282 {
5283 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
5284 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5285 }
5286 }
5287
5288 /* NP */
5289 if (!Desc.Legacy.Gen.u1Present)
5290 {
5291 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
5292 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewLdt);
5293 }
5294
5295 /* Nested-guest SVM intercept. */
5296 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
5297 {
5298 Log(("lldt: Guest intercept -> #VMEXIT\n"));
5299 IEM_SVM_UPDATE_NRIP(pVCpu);
5300 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5301 }
5302
5303 /*
5304 * It checks out alright, update the registers.
5305 */
5306/** @todo check if the actual value is loaded or if the RPL is dropped */
5307 CPUMSetGuestLDTR(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5308 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
5309 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
5310 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5311 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5312 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
5313
5314 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5315}
5316
5317
5318/**
5319 * Implements sldt GReg
5320 *
5321 * @param iGReg The general register to store the CRx value in.
5322 * @param enmEffOpSize The operand size.
5323 */
5324IEM_CIMPL_DEF_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5325{
5326 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5327 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5328 {
5329 Log(("sldt: Guest intercept -> VM-exit\n"));
5330 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_SLDT, cbInstr);
5331 }
5332
5333 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
5334
5335 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
5336 switch (enmEffOpSize)
5337 {
5338 case IEMMODE_16BIT: *(uint16_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.ldtr.Sel; break;
5339 case IEMMODE_32BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.ldtr.Sel; break;
5340 case IEMMODE_64BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.ldtr.Sel; break;
5341 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5342 }
5343 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5344}
5345
5346
5347/**
5348 * Implements sldt mem.
5349 *
5350 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5351 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5352 */
5353IEM_CIMPL_DEF_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5354{
5355 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);
5356
5357 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
5358 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, pVCpu->cpum.GstCtx.ldtr.Sel);
5359 if (rcStrict == VINF_SUCCESS)
5360 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5361 return rcStrict;
5362}
5363
5364
5365/**
5366 * Implements ltr.
5367 *
5368 * @param uNewTr The new TSS selector value.
5369 */
5370IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
5371{
5372 /*
5373 * Check preconditions.
5374 */
5375 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5376 {
5377 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
5378 return iemRaiseUndefinedOpcode(pVCpu);
5379 }
5380 if (pVCpu->iem.s.uCpl != 0)
5381 {
5382 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pVCpu->iem.s.uCpl));
5383 return iemRaiseGeneralProtectionFault0(pVCpu);
5384 }
5385 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5386 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5387 {
5388 Log(("ltr: Guest intercept -> VM-exit\n"));
5389 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_LTR, cbInstr);
5390 }
5391 if (uNewTr & X86_SEL_LDT)
5392 {
5393 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
5394 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewTr);
5395 }
5396 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
5397 {
5398 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
5399 return iemRaiseGeneralProtectionFault0(pVCpu);
5400 }
5401 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TR_WRITES))
5402 {
5403 Log(("ltr: Guest intercept -> #VMEXIT\n"));
5404 IEM_SVM_UPDATE_NRIP(pVCpu);
5405 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5406 }
5407
5408 /*
5409 * Read the descriptor.
5410 */
5411 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_TR);
5412 IEMSELDESC Desc;
5413 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
5414 if (rcStrict != VINF_SUCCESS)
5415 return rcStrict;
5416
5417 /* Check GPs first. */
5418 if (Desc.Legacy.Gen.u1DescType)
5419 {
5420 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5421 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5422 }
5423 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
5424 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
5425 || IEM_IS_LONG_MODE(pVCpu)) )
5426 {
5427 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5428 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5429 }
5430 uint64_t u64Base;
5431 if (!IEM_IS_LONG_MODE(pVCpu))
5432 u64Base = X86DESC_BASE(&Desc.Legacy);
5433 else
5434 {
5435 if (Desc.Long.Gen.u5Zeros)
5436 {
5437 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
5438 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5439 }
5440
5441 u64Base = X86DESC64_BASE(&Desc.Long);
5442 if (!IEM_IS_CANONICAL(u64Base))
5443 {
5444 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
5445 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5446 }
5447 }
5448
5449 /* NP */
5450 if (!Desc.Legacy.Gen.u1Present)
5451 {
5452 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
5453 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewTr);
5454 }
5455
5456 /*
5457 * Set it busy.
5458 * Note! Intel says this should lock down the whole descriptor, but we'll
5459 * restrict our selves to 32-bit for now due to lack of inline
5460 * assembly and such.
5461 */
5462 void *pvDesc;
5463 rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pVCpu->cpum.GstCtx.gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL),
5464 IEM_ACCESS_DATA_RW, 0);
5465 if (rcStrict != VINF_SUCCESS)
5466 return rcStrict;
5467 switch ((uintptr_t)pvDesc & 3)
5468 {
5469 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
5470 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
5471 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
5472 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
5473 }
5474 rcStrict = iemMemCommitAndUnmap(pVCpu, pvDesc, IEM_ACCESS_DATA_RW);
5475 if (rcStrict != VINF_SUCCESS)
5476 return rcStrict;
5477 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
5478
5479 /*
5480 * It checks out alright, update the registers.
5481 */
5482/** @todo check if the actual value is loaded or if the RPL is dropped */
5483 CPUMSetGuestTR(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5484 pVCpu->cpum.GstCtx.tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
5485 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
5486 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5487 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5488 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
5489
5490 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5491}
5492
5493
5494/**
5495 * Implements str GReg
5496 *
5497 * @param iGReg The general register to store the CRx value in.
5498 * @param enmEffOpSize The operand size.
5499 */
5500IEM_CIMPL_DEF_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5501{
5502 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5503 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5504 {
5505 Log(("str_reg: Guest intercept -> VM-exit\n"));
5506 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_STR, cbInstr);
5507 }
5508
5509 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
5510
5511 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
5512 switch (enmEffOpSize)
5513 {
5514 case IEMMODE_16BIT: *(uint16_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.tr.Sel; break;
5515 case IEMMODE_32BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.tr.Sel; break;
5516 case IEMMODE_64BIT: *(uint64_t *)iemGRegRef(pVCpu, iGReg) = pVCpu->cpum.GstCtx.tr.Sel; break;
5517 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5518 }
5519 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5520}
5521
5522
5523/**
5524 * Implements str mem.
5525 *
5526 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5527 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5528 */
5529IEM_CIMPL_DEF_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5530{
5531 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5532 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5533 {
5534 Log(("str_mem: Guest intercept -> VM-exit\n"));
5535 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_STR, cbInstr);
5536 }
5537
5538 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);
5539
5540 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
5541 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, pVCpu->cpum.GstCtx.tr.Sel);
5542 if (rcStrict == VINF_SUCCESS)
5543 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5544 return rcStrict;
5545}
5546
5547
5548/**
5549 * Implements mov GReg,CRx.
5550 *
5551 * @param iGReg The general register to store the CRx value in.
5552 * @param iCrReg The CRx register to read (valid).
5553 */
5554IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
5555{
5556 if (pVCpu->iem.s.uCpl != 0)
5557 return iemRaiseGeneralProtectionFault0(pVCpu);
5558 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5559
5560 if (IEM_SVM_IS_READ_CR_INTERCEPT_SET(pVCpu, iCrReg))
5561 {
5562 Log(("iemCImpl_mov_Rd_Cd: Guest intercept CR%u -> #VMEXIT\n", iCrReg));
5563 IEM_SVM_UPDATE_NRIP(pVCpu);
5564 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);
5565 }
5566
5567 /* Read it. */
5568 uint64_t crX;
5569 switch (iCrReg)
5570 {
5571 case 0:
5572 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5573 crX = pVCpu->cpum.GstCtx.cr0;
5574 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
5575 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */
5576 break;
5577 case 2:
5578 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR2);
5579 crX = pVCpu->cpum.GstCtx.cr2;
5580 break;
5581 case 3:
5582 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
5583 crX = pVCpu->cpum.GstCtx.cr3;
5584 break;
5585 case 4:
5586 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5587 crX = pVCpu->cpum.GstCtx.cr4;
5588 break;
5589 case 8:
5590 {
5591 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
5592#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5593 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5594 {
5595 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovFromCr8(pVCpu, iGReg, cbInstr);
5596 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5597 return rcStrict;
5598
5599 /*
5600 * If the Mov-from-CR8 doesn't cause a VM-exit, bits 7:4 of the VTPR is copied
5601 * to bits 0:3 of the destination operand. Bits 63:4 of the destination operand
5602 * are cleared.
5603 *
5604 * See Intel Spec. 29.3 "Virtualizing CR8-based TPR Accesses"
5605 */
5606 if (IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
5607 {
5608 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
5609 crX = (uTpr >> 4) & 0xf;
5610 break;
5611 }
5612 }
5613#endif
5614#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5615 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5616 {
5617 PCSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl;
5618 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu)))
5619 {
5620 crX = pVmcbCtrl->IntCtrl.n.u8VTPR & 0xf;
5621 break;
5622 }
5623 }
5624#endif
5625 uint8_t uTpr;
5626 int rc = APICGetTpr(pVCpu, &uTpr, NULL, NULL);
5627 if (RT_SUCCESS(rc))
5628 crX = uTpr >> 4;
5629 else
5630 crX = 0;
5631 break;
5632 }
5633 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5634 }
5635
5636#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5637 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5638 {
5639 switch (iCrReg)
5640 {
5641 /* CR0/CR4 reads are subject to masking when in VMX non-root mode. */
5642 case 0: crX = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u); break;
5643 case 4: crX = CPUMGetGuestVmxMaskedCr4(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr4Mask.u); break;
5644
5645 case 3:
5646 {
5647 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovFromCr3(pVCpu, iGReg, cbInstr);
5648 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5649 return rcStrict;
5650 break;
5651 }
5652 }
5653 }
5654#endif
5655
5656 /* Store it. */
5657 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5658 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = crX;
5659 else
5660 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)crX;
5661
5662 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5663}
5664
5665
5666/**
5667 * Implements smsw GReg.
5668 *
5669 * @param iGReg The general register to store the CRx value in.
5670 * @param enmEffOpSize The operand size.
5671 */
5672IEM_CIMPL_DEF_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5673{
5674 IEM_SVM_CHECK_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5675
5676#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5677 uint64_t u64MaskedCr0;
5678 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5679 u64MaskedCr0 = pVCpu->cpum.GstCtx.cr0;
5680 else
5681 u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u);
5682 uint64_t const u64GuestCr0 = u64MaskedCr0;
5683#else
5684 uint64_t const u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
5685#endif
5686
5687 switch (enmEffOpSize)
5688 {
5689 case IEMMODE_16BIT:
5690 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
5691 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = (uint16_t)u64GuestCr0;
5692 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5693 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = (uint16_t)u64GuestCr0 | 0xffe0;
5694 else
5695 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = (uint16_t)u64GuestCr0 | 0xfff0;
5696 break;
5697
5698 case IEMMODE_32BIT:
5699 *(uint32_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)u64GuestCr0;
5700 break;
5701
5702 case IEMMODE_64BIT:
5703 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = u64GuestCr0;
5704 break;
5705
5706 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5707 }
5708
5709 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5710}
5711
5712
5713/**
5714 * Implements smsw mem.
5715 *
5716 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5717 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5718 */
5719IEM_CIMPL_DEF_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5720{
5721 IEM_SVM_CHECK_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5722
5723#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5724 uint64_t u64MaskedCr0;
5725 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5726 u64MaskedCr0 = pVCpu->cpum.GstCtx.cr0;
5727 else
5728 u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u);
5729 uint64_t const u64GuestCr0 = u64MaskedCr0;
5730#else
5731 uint64_t const u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
5732#endif
5733
5734 uint16_t u16Value;
5735 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
5736 u16Value = (uint16_t)u64GuestCr0;
5737 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5738 u16Value = (uint16_t)u64GuestCr0 | 0xffe0;
5739 else
5740 u16Value = (uint16_t)u64GuestCr0 | 0xfff0;
5741
5742 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, u16Value);
5743 if (rcStrict == VINF_SUCCESS)
5744 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5745 return rcStrict;
5746}
5747
5748
5749/**
5750 * Helper for mapping CR3 and PAE PDPEs for 'mov CRx,GReg'.
5751 */
5752#define IEM_MAP_PAE_PDPES_AT_CR3_RET(a_pVCpu, a_iCrReg, a_uCr3) \
5753 do \
5754 { \
5755 int const rcX = PGMGstMapPaePdpesAtCr3(a_pVCpu, a_uCr3); \
5756 if (RT_SUCCESS(rcX)) \
5757 { /* likely */ } \
5758 else \
5759 { \
5760 /* Either invalid PDPTEs or CR3 second-level translation failed. Raise #GP(0) either way. */ \
5761 Log(("iemCImpl_load_Cr%#x: Trying to load invalid PAE PDPEs\n", a_iCrReg)); \
5762 return iemRaiseGeneralProtectionFault0(a_pVCpu); \
5763 } \
5764 } while (0)
5765
5766
5767/**
5768 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
5769 *
5770 * @param iCrReg The CRx register to write (valid).
5771 * @param uNewCrX The new value.
5772 * @param enmAccessCrX The instruction that caused the CrX load.
5773 * @param iGReg The general register in case of a 'mov CRx,GReg'
5774 * instruction.
5775 */
5776IEM_CIMPL_DEF_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg)
5777{
5778 VBOXSTRICTRC rcStrict;
5779 int rc;
5780#ifndef VBOX_WITH_NESTED_HWVIRT_SVM
5781 RT_NOREF2(iGReg, enmAccessCrX);
5782#endif
5783
5784 /*
5785 * Try store it.
5786 * Unfortunately, CPUM only does a tiny bit of the work.
5787 */
5788 switch (iCrReg)
5789 {
5790 case 0:
5791 {
5792 /*
5793 * Perform checks.
5794 */
5795 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5796
5797 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr0;
5798 uint32_t const fValid = CPUMGetGuestCR0ValidMask();
5799
5800 /* ET is hardcoded on 486 and later. */
5801 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_486)
5802 uNewCrX |= X86_CR0_ET;
5803 /* The 386 and 486 didn't #GP(0) on attempting to set reserved CR0 bits. ET was settable on 386. */
5804 else if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_486)
5805 {
5806 uNewCrX &= fValid;
5807 uNewCrX |= X86_CR0_ET;
5808 }
5809 else
5810 uNewCrX &= X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG | X86_CR0_ET;
5811
5812 /* Check for reserved bits. */
5813 if (uNewCrX & ~(uint64_t)fValid)
5814 {
5815 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5816 return iemRaiseGeneralProtectionFault0(pVCpu);
5817 }
5818
5819 /* Check for invalid combinations. */
5820 if ( (uNewCrX & X86_CR0_PG)
5821 && !(uNewCrX & X86_CR0_PE) )
5822 {
5823 Log(("Trying to set CR0.PG without CR0.PE\n"));
5824 return iemRaiseGeneralProtectionFault0(pVCpu);
5825 }
5826
5827 if ( !(uNewCrX & X86_CR0_CD)
5828 && (uNewCrX & X86_CR0_NW) )
5829 {
5830 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
5831 return iemRaiseGeneralProtectionFault0(pVCpu);
5832 }
5833
5834 if ( !(uNewCrX & X86_CR0_PG)
5835 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE))
5836 {
5837 Log(("Trying to clear CR0.PG while leaving CR4.PCID set\n"));
5838 return iemRaiseGeneralProtectionFault0(pVCpu);
5839 }
5840
5841 /* Long mode consistency checks. */
5842 if ( (uNewCrX & X86_CR0_PG)
5843 && !(uOldCrX & X86_CR0_PG)
5844 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) )
5845 {
5846 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE))
5847 {
5848 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
5849 return iemRaiseGeneralProtectionFault0(pVCpu);
5850 }
5851 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long)
5852 {
5853 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
5854 return iemRaiseGeneralProtectionFault0(pVCpu);
5855 }
5856 }
5857
5858 /* Check for bits that must remain set or cleared in VMX operation,
5859 see Intel spec. 23.8 "Restrictions on VMX operation". */
5860 if (IEM_VMX_IS_ROOT_MODE(pVCpu))
5861 {
5862#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5863 uint64_t const uCr0Fixed0 = IEM_VMX_IS_NON_ROOT_MODE(pVCpu) ? iemVmxGetCr0Fixed0(pVCpu) : VMX_V_CR0_FIXED0;
5864#else
5865 uint64_t const uCr0Fixed0 = VMX_V_CR0_FIXED0;
5866#endif
5867 if ((uNewCrX & uCr0Fixed0) != uCr0Fixed0)
5868 {
5869 Log(("Trying to clear reserved CR0 bits in VMX operation: NewCr0=%#llx MB1=%#llx\n", uNewCrX, uCr0Fixed0));
5870 return iemRaiseGeneralProtectionFault0(pVCpu);
5871 }
5872
5873 uint64_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
5874 if (uNewCrX & ~uCr0Fixed1)
5875 {
5876 Log(("Trying to set reserved CR0 bits in VMX operation: NewCr0=%#llx MB0=%#llx\n", uNewCrX, uCr0Fixed1));
5877 return iemRaiseGeneralProtectionFault0(pVCpu);
5878 }
5879 }
5880
5881 /*
5882 * SVM nested-guest CR0 write intercepts.
5883 */
5884 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, iCrReg))
5885 {
5886 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5887 IEM_SVM_UPDATE_NRIP(pVCpu);
5888 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);
5889 }
5890 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
5891 {
5892 /* 'lmsw' intercepts regardless of whether the TS/MP bits are actually toggled. */
5893 if ( enmAccessCrX == IEMACCESSCRX_LMSW
5894 || (uNewCrX & ~(X86_CR0_TS | X86_CR0_MP)) != (uOldCrX & ~(X86_CR0_TS | X86_CR0_MP)))
5895 {
5896 Assert(enmAccessCrX != IEMACCESSCRX_CLTS);
5897 Log(("iemCImpl_load_Cr%#x: lmsw or bits other than TS/MP changed: Guest intercept -> #VMEXIT\n", iCrReg));
5898 IEM_SVM_UPDATE_NRIP(pVCpu);
5899 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_CR0_SEL_WRITE, enmAccessCrX, iGReg);
5900 }
5901 }
5902
5903 /*
5904 * Change EFER.LMA if entering or leaving long mode.
5905 */
5906 uint64_t NewEFER = pVCpu->cpum.GstCtx.msrEFER;
5907 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
5908 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) )
5909 {
5910 if (uNewCrX & X86_CR0_PG)
5911 NewEFER |= MSR_K6_EFER_LMA;
5912 else
5913 NewEFER &= ~MSR_K6_EFER_LMA;
5914
5915 CPUMSetGuestEFER(pVCpu, NewEFER);
5916 Assert(pVCpu->cpum.GstCtx.msrEFER == NewEFER);
5917 }
5918
5919 /*
5920 * Inform PGM.
5921 */
5922 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW))
5923 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW)) )
5924 {
5925 if ( enmAccessCrX != IEMACCESSCRX_MOV_CRX
5926 || !CPUMIsPaePagingEnabled(uNewCrX, pVCpu->cpum.GstCtx.cr4, NewEFER)
5927 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5928 { /* likely */ }
5929 else
5930 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
5931 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
5932 AssertRCReturn(rc, rc);
5933 /* ignore informational status codes */
5934 }
5935
5936 /*
5937 * Change CR0.
5938 */
5939 CPUMSetGuestCR0(pVCpu, uNewCrX);
5940 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCrX);
5941
5942 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
5943 false /* fForce */);
5944 break;
5945 }
5946
5947 /*
5948 * CR2 can be changed without any restrictions.
5949 */
5950 case 2:
5951 {
5952 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 2))
5953 {
5954 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5955 IEM_SVM_UPDATE_NRIP(pVCpu);
5956 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);
5957 }
5958 pVCpu->cpum.GstCtx.cr2 = uNewCrX;
5959 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_CR2;
5960 rcStrict = VINF_SUCCESS;
5961 break;
5962 }
5963
5964 /*
5965 * CR3 is relatively simple, although AMD and Intel have different
5966 * accounts of how setting reserved bits are handled. We take intel's
5967 * word for the lower bits and AMD's for the high bits (63:52). The
5968 * lower reserved bits are ignored and left alone; OpenBSD 5.8 relies
5969 * on this.
5970 */
5971 /** @todo Testcase: Setting reserved bits in CR3, especially before
5972 * enabling paging. */
5973 case 3:
5974 {
5975 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
5976
5977 /* Bit 63 being clear in the source operand with PCIDE indicates no invalidations are required. */
5978 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE)
5979 && (uNewCrX & RT_BIT_64(63)))
5980 {
5981 /** @todo r=ramshankar: avoiding a TLB flush altogether here causes Windows 10
5982 * SMP(w/o nested-paging) to hang during bootup on Skylake systems, see
5983 * Intel spec. 4.10.4.1 "Operations that Invalidate TLBs and
5984 * Paging-Structure Caches". */
5985 uNewCrX &= ~RT_BIT_64(63);
5986 }
5987
5988 /* Check / mask the value. */
5989#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5990 /* See Intel spec. 27.2.2 "EPT Translation Mechanism" footnote. */
5991 uint64_t const fInvPhysMask = !CPUMIsGuestVmxEptPagingEnabledEx(IEM_GET_CTX(pVCpu))
5992 ? (UINT64_MAX << IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
5993 : (~X86_CR3_EPT_PAGE_MASK & X86_PAGE_4K_BASE_MASK);
5994#else
5995 uint64_t const fInvPhysMask = UINT64_C(0xfff0000000000000);
5996#endif
5997 if (uNewCrX & fInvPhysMask)
5998 {
5999 /** @todo Should we raise this only for 64-bit mode like Intel claims? AMD is
6000 * very vague in this area. As mentioned above, need testcase on real
6001 * hardware... Sigh. */
6002 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
6003 return iemRaiseGeneralProtectionFault0(pVCpu);
6004 }
6005
6006 uint64_t fValid;
6007 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
6008 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME))
6009 {
6010 /** @todo Redundant? This value has already been validated above. */
6011 fValid = UINT64_C(0x000fffffffffffff);
6012 }
6013 else
6014 fValid = UINT64_C(0xffffffff);
6015 if (uNewCrX & ~fValid)
6016 {
6017 Log(("Automatically clearing reserved MBZ bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
6018 uNewCrX, uNewCrX & ~fValid));
6019 uNewCrX &= fValid;
6020 }
6021
6022 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 3))
6023 {
6024 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6025 IEM_SVM_UPDATE_NRIP(pVCpu);
6026 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);
6027 }
6028
6029 /* Inform PGM. */
6030 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG)
6031 {
6032 if ( !CPUMIsGuestInPAEModeEx(IEM_GET_CTX(pVCpu))
6033 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6034 { /* likely */ }
6035 else
6036 {
6037 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
6038 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, uNewCrX);
6039 }
6040 rc = PGMFlushTLB(pVCpu, uNewCrX, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
6041 AssertRCReturn(rc, rc);
6042 /* ignore informational status codes */
6043 }
6044
6045 /* Make the change. */
6046 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
6047 AssertRCSuccessReturn(rc, rc);
6048
6049 rcStrict = VINF_SUCCESS;
6050 break;
6051 }
6052
6053 /*
6054 * CR4 is a bit more tedious as there are bits which cannot be cleared
6055 * under some circumstances and such.
6056 */
6057 case 4:
6058 {
6059 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6060 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr4;
6061
6062 /* Reserved bits. */
6063 uint32_t const fValid = CPUMGetGuestCR4ValidMask(pVCpu->CTX_SUFF(pVM));
6064 if (uNewCrX & ~(uint64_t)fValid)
6065 {
6066 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
6067 return iemRaiseGeneralProtectionFault0(pVCpu);
6068 }
6069
6070 bool const fPcide = !(uOldCrX & X86_CR4_PCIDE) && (uNewCrX & X86_CR4_PCIDE);
6071 bool const fLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
6072
6073 /* PCIDE check. */
6074 if ( fPcide
6075 && ( !fLongMode
6076 || (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff))))
6077 {
6078 Log(("Trying to set PCIDE with invalid PCID or outside long mode. Pcid=%#x\n", (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff))));
6079 return iemRaiseGeneralProtectionFault0(pVCpu);
6080 }
6081
6082 /* PAE check. */
6083 if ( fLongMode
6084 && (uOldCrX & X86_CR4_PAE)
6085 && !(uNewCrX & X86_CR4_PAE))
6086 {
6087 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
6088 return iemRaiseGeneralProtectionFault0(pVCpu);
6089 }
6090
6091 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 4))
6092 {
6093 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6094 IEM_SVM_UPDATE_NRIP(pVCpu);
6095 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
6096 }
6097
6098 /* Check for bits that must remain set or cleared in VMX operation,
6099 see Intel spec. 23.8 "Restrictions on VMX operation". */
6100 if (IEM_VMX_IS_ROOT_MODE(pVCpu))
6101 {
6102 uint64_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
6103 if ((uNewCrX & uCr4Fixed0) != uCr4Fixed0)
6104 {
6105 Log(("Trying to clear reserved CR4 bits in VMX operation: NewCr4=%#llx MB1=%#llx\n", uNewCrX, uCr4Fixed0));
6106 return iemRaiseGeneralProtectionFault0(pVCpu);
6107 }
6108
6109 uint64_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
6110 if (uNewCrX & ~uCr4Fixed1)
6111 {
6112 Log(("Trying to set reserved CR4 bits in VMX operation: NewCr4=%#llx MB0=%#llx\n", uNewCrX, uCr4Fixed1));
6113 return iemRaiseGeneralProtectionFault0(pVCpu);
6114 }
6115 }
6116
6117 /*
6118 * Notify PGM.
6119 */
6120 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */))
6121 {
6122 if ( !CPUMIsPaePagingEnabled(pVCpu->cpum.GstCtx.cr0, uNewCrX, pVCpu->cpum.GstCtx.msrEFER)
6123 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6124 { /* likely */ }
6125 else
6126 {
6127 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
6128 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
6129 }
6130 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
6131 AssertRCReturn(rc, rc);
6132 /* ignore informational status codes */
6133 }
6134
6135 /*
6136 * Change it.
6137 */
6138 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
6139 AssertRCSuccessReturn(rc, rc);
6140 Assert(pVCpu->cpum.GstCtx.cr4 == uNewCrX);
6141
6142 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
6143 false /* fForce */);
6144 break;
6145 }
6146
6147 /*
6148 * CR8 maps to the APIC TPR.
6149 */
6150 case 8:
6151 {
6152 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
6153 if (uNewCrX & ~(uint64_t)0xf)
6154 {
6155 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
6156 return iemRaiseGeneralProtectionFault0(pVCpu);
6157 }
6158
6159#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6160 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6161 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
6162 {
6163 /*
6164 * If the Mov-to-CR8 doesn't cause a VM-exit, bits 0:3 of the source operand
6165 * is copied to bits 7:4 of the VTPR. Bits 0:3 and bits 31:8 of the VTPR are
6166 * cleared. Following this the processor performs TPR virtualization.
6167 *
6168 * However, we should not perform TPR virtualization immediately here but
6169 * after this instruction has completed.
6170 *
6171 * See Intel spec. 29.3 "Virtualizing CR8-based TPR Accesses"
6172 * See Intel spec. 27.1 "Architectural State Before A VM-exit"
6173 */
6174 uint32_t const uTpr = (uNewCrX & 0xf) << 4;
6175 Log(("iemCImpl_load_Cr%#x: Virtualizing TPR (%#x) write\n", iCrReg, uTpr));
6176 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uTpr);
6177 iemVmxVirtApicSetPendingWrite(pVCpu, XAPIC_OFF_TPR);
6178 rcStrict = VINF_SUCCESS;
6179 break;
6180 }
6181#endif
6182
6183#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6184 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6185 {
6186 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 8))
6187 {
6188 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6189 IEM_SVM_UPDATE_NRIP(pVCpu);
6190 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);
6191 }
6192
6193 pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u8VTPR = uNewCrX;
6194 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu)))
6195 {
6196 rcStrict = VINF_SUCCESS;
6197 break;
6198 }
6199 }
6200#endif
6201 uint8_t const u8Tpr = (uint8_t)uNewCrX << 4;
6202 APICSetTpr(pVCpu, u8Tpr);
6203 rcStrict = VINF_SUCCESS;
6204 break;
6205 }
6206
6207 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
6208 }
6209
6210 /*
6211 * Advance the RIP on success.
6212 */
6213 if (RT_SUCCESS(rcStrict))
6214 {
6215 if (rcStrict != VINF_SUCCESS)
6216 iemSetPassUpStatus(pVCpu, rcStrict);
6217 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6218 }
6219
6220 return rcStrict;
6221}
6222
6223
6224/**
6225 * Implements mov CRx,GReg.
6226 *
6227 * @param iCrReg The CRx register to write (valid).
6228 * @param iGReg The general register to load the CRx value from.
6229 */
6230IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
6231{
6232 if (pVCpu->iem.s.uCpl != 0)
6233 return iemRaiseGeneralProtectionFault0(pVCpu);
6234 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6235
6236 /*
6237 * Read the new value from the source register and call common worker.
6238 */
6239 uint64_t uNewCrX;
6240 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6241 uNewCrX = iemGRegFetchU64(pVCpu, iGReg);
6242 else
6243 uNewCrX = iemGRegFetchU32(pVCpu, iGReg);
6244
6245#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6246 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6247 {
6248 VBOXSTRICTRC rcStrict = VINF_VMX_INTERCEPT_NOT_ACTIVE;
6249 switch (iCrReg)
6250 {
6251 case 0:
6252 case 4: rcStrict = iemVmxVmexitInstrMovToCr0Cr4(pVCpu, iCrReg, &uNewCrX, iGReg, cbInstr); break;
6253 case 3: rcStrict = iemVmxVmexitInstrMovToCr3(pVCpu, uNewCrX, iGReg, cbInstr); break;
6254 case 8: rcStrict = iemVmxVmexitInstrMovToCr8(pVCpu, iGReg, cbInstr); break;
6255 }
6256 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6257 return rcStrict;
6258 }
6259#endif
6260
6261 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, iCrReg, uNewCrX, IEMACCESSCRX_MOV_CRX, iGReg);
6262}
6263
6264
6265/**
6266 * Implements 'LMSW r/m16'
6267 *
6268 * @param u16NewMsw The new value.
6269 * @param GCPtrEffDst The guest-linear address of the source operand in case
6270 * of a memory operand. For register operand, pass
6271 * NIL_RTGCPTR.
6272 */
6273IEM_CIMPL_DEF_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst)
6274{
6275 if (pVCpu->iem.s.uCpl != 0)
6276 return iemRaiseGeneralProtectionFault0(pVCpu);
6277 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6278 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6279
6280#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6281 /* Check nested-guest VMX intercept and get updated MSW if there's no VM-exit. */
6282 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6283 {
6284 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrLmsw(pVCpu, pVCpu->cpum.GstCtx.cr0, &u16NewMsw, GCPtrEffDst, cbInstr);
6285 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6286 return rcStrict;
6287 }
6288#else
6289 RT_NOREF_PV(GCPtrEffDst);
6290#endif
6291
6292 /*
6293 * Compose the new CR0 value and call common worker.
6294 */
6295 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
6296 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
6297 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */);
6298}
6299
6300
6301/**
6302 * Implements 'CLTS'.
6303 */
6304IEM_CIMPL_DEF_0(iemCImpl_clts)
6305{
6306 if (pVCpu->iem.s.uCpl != 0)
6307 return iemRaiseGeneralProtectionFault0(pVCpu);
6308
6309 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6310 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0;
6311 uNewCr0 &= ~X86_CR0_TS;
6312
6313#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6314 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6315 {
6316 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrClts(pVCpu, cbInstr);
6317 if (rcStrict == VINF_VMX_MODIFIES_BEHAVIOR)
6318 uNewCr0 |= (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS);
6319 else if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6320 return rcStrict;
6321 }
6322#endif
6323
6324 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_CLTS, UINT8_MAX /* iGReg */);
6325}
6326
6327
6328/**
6329 * Implements mov GReg,DRx.
6330 *
6331 * @param iGReg The general register to store the DRx value in.
6332 * @param iDrReg The DRx register to read (0-7).
6333 */
6334IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
6335{
6336#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6337 /*
6338 * Check nested-guest VMX intercept.
6339 * Unlike most other intercepts, the Mov DRx intercept takes preceedence
6340 * over CPL and CR4.DE and even DR4/DR5 checks.
6341 *
6342 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
6343 */
6344 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6345 {
6346 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovDrX(pVCpu, VMXINSTRID_MOV_FROM_DRX, iDrReg, iGReg, cbInstr);
6347 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6348 return rcStrict;
6349 }
6350#endif
6351
6352 /*
6353 * Check preconditions.
6354 */
6355 /* Raise GPs. */
6356 if (pVCpu->iem.s.uCpl != 0)
6357 return iemRaiseGeneralProtectionFault0(pVCpu);
6358 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6359 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_CR0);
6360
6361 if ( (iDrReg == 4 || iDrReg == 5)
6362 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE) )
6363 {
6364 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
6365 return iemRaiseGeneralProtectionFault0(pVCpu);
6366 }
6367
6368 /* Raise #DB if general access detect is enabled. */
6369 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD)
6370 {
6371 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
6372 return iemRaiseDebugException(pVCpu);
6373 }
6374
6375 /*
6376 * Read the debug register and store it in the specified general register.
6377 */
6378 uint64_t drX;
6379 switch (iDrReg)
6380 {
6381 case 0:
6382 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6383 drX = pVCpu->cpum.GstCtx.dr[0];
6384 break;
6385 case 1:
6386 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6387 drX = pVCpu->cpum.GstCtx.dr[1];
6388 break;
6389 case 2:
6390 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6391 drX = pVCpu->cpum.GstCtx.dr[2];
6392 break;
6393 case 3:
6394 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6395 drX = pVCpu->cpum.GstCtx.dr[3];
6396 break;
6397 case 6:
6398 case 4:
6399 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
6400 drX = pVCpu->cpum.GstCtx.dr[6];
6401 drX |= X86_DR6_RA1_MASK;
6402 drX &= ~X86_DR6_RAZ_MASK;
6403 break;
6404 case 7:
6405 case 5:
6406 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6407 drX = pVCpu->cpum.GstCtx.dr[7];
6408 drX |=X86_DR7_RA1_MASK;
6409 drX &= ~X86_DR7_RAZ_MASK;
6410 break;
6411 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* caller checks */
6412 }
6413
6414 /** @todo SVM nested-guest intercept for DR8-DR15? */
6415 /*
6416 * Check for any SVM nested-guest intercepts for the DRx read.
6417 */
6418 if (IEM_SVM_IS_READ_DR_INTERCEPT_SET(pVCpu, iDrReg))
6419 {
6420 Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg));
6421 IEM_SVM_UPDATE_NRIP(pVCpu);
6422 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),
6423 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
6424 }
6425
6426 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6427 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = drX;
6428 else
6429 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)drX;
6430
6431 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6432}
6433
6434
6435/**
6436 * Implements mov DRx,GReg.
6437 *
6438 * @param iDrReg The DRx register to write (valid).
6439 * @param iGReg The general register to load the DRx value from.
6440 */
6441IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
6442{
6443#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6444 /*
6445 * Check nested-guest VMX intercept.
6446 * Unlike most other intercepts, the Mov DRx intercept takes preceedence
6447 * over CPL and CR4.DE and even DR4/DR5 checks.
6448 *
6449 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
6450 */
6451 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6452 {
6453 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovDrX(pVCpu, VMXINSTRID_MOV_TO_DRX, iDrReg, iGReg, cbInstr);
6454 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6455 return rcStrict;
6456 }
6457#endif
6458
6459 /*
6460 * Check preconditions.
6461 */
6462 if (pVCpu->iem.s.uCpl != 0)
6463 return iemRaiseGeneralProtectionFault0(pVCpu);
6464 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6465 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_CR4);
6466
6467 if (iDrReg == 4 || iDrReg == 5)
6468 {
6469 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE)
6470 {
6471 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
6472 return iemRaiseGeneralProtectionFault0(pVCpu);
6473 }
6474 iDrReg += 2;
6475 }
6476
6477 /* Raise #DB if general access detect is enabled. */
6478 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
6479 * \#GP? */
6480 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD)
6481 {
6482 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
6483 return iemRaiseDebugException(pVCpu);
6484 }
6485
6486 /*
6487 * Read the new value from the source register.
6488 */
6489 uint64_t uNewDrX;
6490 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6491 uNewDrX = iemGRegFetchU64(pVCpu, iGReg);
6492 else
6493 uNewDrX = iemGRegFetchU32(pVCpu, iGReg);
6494
6495 /*
6496 * Adjust it.
6497 */
6498 switch (iDrReg)
6499 {
6500 case 0:
6501 case 1:
6502 case 2:
6503 case 3:
6504 /* nothing to adjust */
6505 break;
6506
6507 case 6:
6508 if (uNewDrX & X86_DR6_MBZ_MASK)
6509 {
6510 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
6511 return iemRaiseGeneralProtectionFault0(pVCpu);
6512 }
6513 uNewDrX |= X86_DR6_RA1_MASK;
6514 uNewDrX &= ~X86_DR6_RAZ_MASK;
6515 break;
6516
6517 case 7:
6518 if (uNewDrX & X86_DR7_MBZ_MASK)
6519 {
6520 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
6521 return iemRaiseGeneralProtectionFault0(pVCpu);
6522 }
6523 uNewDrX |= X86_DR7_RA1_MASK;
6524 uNewDrX &= ~X86_DR7_RAZ_MASK;
6525 break;
6526
6527 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6528 }
6529
6530 /** @todo SVM nested-guest intercept for DR8-DR15? */
6531 /*
6532 * Check for any SVM nested-guest intercepts for the DRx write.
6533 */
6534 if (IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(pVCpu, iDrReg))
6535 {
6536 Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg));
6537 IEM_SVM_UPDATE_NRIP(pVCpu);
6538 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),
6539 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
6540 }
6541
6542 /*
6543 * Do the actual setting.
6544 */
6545 if (iDrReg < 4)
6546 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6547 else if (iDrReg == 6)
6548 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
6549
6550 int rc = CPUMSetGuestDRx(pVCpu, iDrReg, uNewDrX);
6551 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc);
6552
6553 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6554}
6555
6556
6557/**
6558 * Implements mov GReg,TRx.
6559 *
6560 * @param iGReg The general register to store the
6561 * TRx value in.
6562 * @param iTrReg The TRx register to read (6/7).
6563 */
6564IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Td, uint8_t, iGReg, uint8_t, iTrReg)
6565{
6566 /*
6567 * Check preconditions. NB: This instruction is 386/486 only.
6568 */
6569
6570 /* Raise GPs. */
6571 if (pVCpu->iem.s.uCpl != 0)
6572 return iemRaiseGeneralProtectionFault0(pVCpu);
6573 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6574
6575 if (iTrReg < 6 || iTrReg > 7)
6576 {
6577 /** @todo Do Intel CPUs reject this or are the TRs aliased? */
6578 Log(("mov r%u,tr%u: invalid register -> #GP(0)\n", iGReg, iTrReg));
6579 return iemRaiseGeneralProtectionFault0(pVCpu);
6580 }
6581
6582 /*
6583 * Read the test register and store it in the specified general register.
6584 * This is currently a dummy implementation that only exists to satisfy
6585 * old debuggers like WDEB386 or OS/2 KDB which unconditionally read the
6586 * TR6/TR7 registers. Software which actually depends on the TR values
6587 * (different on 386/486) is exceedingly rare.
6588 */
6589 uint64_t trX;
6590 switch (iTrReg)
6591 {
6592 case 6:
6593 trX = 0; /* Currently a dummy. */
6594 break;
6595 case 7:
6596 trX = 0; /* Currently a dummy. */
6597 break;
6598 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
6599 }
6600
6601 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)trX;
6602
6603 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6604}
6605
6606
6607/**
6608 * Implements mov TRx,GReg.
6609 *
6610 * @param iTrReg The TRx register to write (valid).
6611 * @param iGReg The general register to load the TRx
6612 * value from.
6613 */
6614IEM_CIMPL_DEF_2(iemCImpl_mov_Td_Rd, uint8_t, iTrReg, uint8_t, iGReg)
6615{
6616 /*
6617 * Check preconditions. NB: This instruction is 386/486 only.
6618 */
6619
6620 /* Raise GPs. */
6621 if (pVCpu->iem.s.uCpl != 0)
6622 return iemRaiseGeneralProtectionFault0(pVCpu);
6623 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6624
6625 if (iTrReg < 6 || iTrReg > 7)
6626 {
6627 /** @todo Do Intel CPUs reject this or are the TRs aliased? */
6628 Log(("mov r%u,tr%u: invalid register -> #GP(0)\n", iGReg, iTrReg));
6629 return iemRaiseGeneralProtectionFault0(pVCpu);
6630 }
6631
6632 /*
6633 * Read the new value from the source register.
6634 */
6635 uint64_t uNewTrX;
6636 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6637 uNewTrX = iemGRegFetchU64(pVCpu, iGReg);
6638 else
6639 uNewTrX = iemGRegFetchU32(pVCpu, iGReg);
6640
6641 /*
6642 * Here we would do the actual setting if this weren't a dummy implementation.
6643 * This is currently a dummy implementation that only exists to prevent
6644 * old debuggers like WDEB386 or OS/2 KDB from crashing.
6645 */
6646 RT_NOREF(uNewTrX);
6647
6648 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6649}
6650
6651
6652/**
6653 * Implements 'INVLPG m'.
6654 *
6655 * @param GCPtrPage The effective address of the page to invalidate.
6656 * @remarks Updates the RIP.
6657 */
6658IEM_CIMPL_DEF_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage)
6659{
6660 /* ring-0 only. */
6661 if (pVCpu->iem.s.uCpl != 0)
6662 return iemRaiseGeneralProtectionFault0(pVCpu);
6663 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6664 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
6665
6666#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6667 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6668 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INVLPG_EXIT))
6669 {
6670 Log(("invlpg: Guest intercept (%RGp) -> VM-exit\n", GCPtrPage));
6671 return iemVmxVmexitInstrInvlpg(pVCpu, GCPtrPage, cbInstr);
6672 }
6673#endif
6674
6675 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
6676 {
6677 Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
6678 IEM_SVM_UPDATE_NRIP(pVCpu);
6679 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_INVLPG,
6680 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? GCPtrPage : 0, 0 /* uExitInfo2 */);
6681 }
6682
6683 int rc = PGMInvalidatePage(pVCpu, GCPtrPage);
6684 if (rc == VINF_SUCCESS)
6685 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6686 if (rc == VINF_PGM_SYNC_CR3)
6687 {
6688 iemSetPassUpStatus(pVCpu, rc);
6689 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6690 }
6691
6692 AssertMsg(RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6693 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", GCPtrPage, rc));
6694 return rc;
6695}
6696
6697
6698/**
6699 * Implements INVPCID.
6700 *
6701 * @param iEffSeg The segment of the invpcid descriptor.
6702 * @param GCPtrInvpcidDesc The address of invpcid descriptor.
6703 * @param uInvpcidType The invalidation type.
6704 * @remarks Updates the RIP.
6705 */
6706IEM_CIMPL_DEF_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint64_t, uInvpcidType)
6707{
6708 /*
6709 * Check preconditions.
6710 */
6711 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fInvpcid)
6712 return iemRaiseUndefinedOpcode(pVCpu);
6713
6714 /* When in VMX non-root mode and INVPCID is not enabled, it results in #UD. */
6715 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6716 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_INVPCID))
6717 {
6718 Log(("invpcid: Not enabled for nested-guest execution -> #UD\n"));
6719 return iemRaiseUndefinedOpcode(pVCpu);
6720 }
6721
6722 if (pVCpu->iem.s.uCpl != 0)
6723 {
6724 Log(("invpcid: CPL != 0 -> #GP(0)\n"));
6725 return iemRaiseGeneralProtectionFault0(pVCpu);
6726 }
6727
6728 if (IEM_IS_V86_MODE(pVCpu))
6729 {
6730 Log(("invpcid: v8086 mode -> #GP(0)\n"));
6731 return iemRaiseGeneralProtectionFault0(pVCpu);
6732 }
6733
6734 /*
6735 * Check nested-guest intercept.
6736 *
6737 * INVPCID causes a VM-exit if "enable INVPCID" and "INVLPG exiting" are
6738 * both set. We have already checked the former earlier in this function.
6739 *
6740 * CPL and virtual-8086 mode checks take priority over this VM-exit.
6741 * See Intel spec. "25.1.1 Relative Priority of Faults and VM Exits".
6742 */
6743 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6744 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INVLPG_EXIT))
6745 {
6746 Log(("invpcid: Guest intercept -> #VM-exit\n"));
6747 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_INVPCID, VMXINSTRID_NONE, cbInstr);
6748 }
6749
6750 if (uInvpcidType > X86_INVPCID_TYPE_MAX_VALID)
6751 {
6752 Log(("invpcid: invalid/unrecognized invpcid type %#RX64 -> #GP(0)\n", uInvpcidType));
6753 return iemRaiseGeneralProtectionFault0(pVCpu);
6754 }
6755 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
6756
6757 /*
6758 * Fetch the invpcid descriptor from guest memory.
6759 */
6760 RTUINT128U uDesc;
6761 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInvpcidDesc);
6762 if (rcStrict == VINF_SUCCESS)
6763 {
6764 /*
6765 * Validate the descriptor.
6766 */
6767 if (uDesc.s.Lo > 0xfff)
6768 {
6769 Log(("invpcid: reserved bits set in invpcid descriptor %#RX64 -> #GP(0)\n", uDesc.s.Lo));
6770 return iemRaiseGeneralProtectionFault0(pVCpu);
6771 }
6772
6773 RTGCUINTPTR64 const GCPtrInvAddr = uDesc.s.Hi;
6774 uint8_t const uPcid = uDesc.s.Lo & UINT64_C(0xfff);
6775 uint32_t const uCr4 = pVCpu->cpum.GstCtx.cr4;
6776 uint64_t const uCr3 = pVCpu->cpum.GstCtx.cr3;
6777 switch (uInvpcidType)
6778 {
6779 case X86_INVPCID_TYPE_INDV_ADDR:
6780 {
6781 if (!IEM_IS_CANONICAL(GCPtrInvAddr))
6782 {
6783 Log(("invpcid: invalidation address %#RGP is not canonical -> #GP(0)\n", GCPtrInvAddr));
6784 return iemRaiseGeneralProtectionFault0(pVCpu);
6785 }
6786 if ( !(uCr4 & X86_CR4_PCIDE)
6787 && uPcid != 0)
6788 {
6789 Log(("invpcid: invalid pcid %#x\n", uPcid));
6790 return iemRaiseGeneralProtectionFault0(pVCpu);
6791 }
6792
6793 /* Invalidate mappings for the linear address tagged with PCID except global translations. */
6794 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6795 break;
6796 }
6797
6798 case X86_INVPCID_TYPE_SINGLE_CONTEXT:
6799 {
6800 if ( !(uCr4 & X86_CR4_PCIDE)
6801 && uPcid != 0)
6802 {
6803 Log(("invpcid: invalid pcid %#x\n", uPcid));
6804 return iemRaiseGeneralProtectionFault0(pVCpu);
6805 }
6806 /* Invalidate all mappings associated with PCID except global translations. */
6807 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6808 break;
6809 }
6810
6811 case X86_INVPCID_TYPE_ALL_CONTEXT_INCL_GLOBAL:
6812 {
6813 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
6814 break;
6815 }
6816
6817 case X86_INVPCID_TYPE_ALL_CONTEXT_EXCL_GLOBAL:
6818 {
6819 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6820 break;
6821 }
6822 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6823 }
6824 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6825 }
6826 return rcStrict;
6827}
6828
6829
6830/**
6831 * Implements INVD.
6832 */
6833IEM_CIMPL_DEF_0(iemCImpl_invd)
6834{
6835 if (pVCpu->iem.s.uCpl != 0)
6836 {
6837 Log(("invd: CPL != 0 -> #GP(0)\n"));
6838 return iemRaiseGeneralProtectionFault0(pVCpu);
6839 }
6840
6841 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6842 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_INVD, cbInstr);
6843
6844 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_INVD, SVM_EXIT_INVD, 0, 0);
6845
6846 /* We currently take no action here. */
6847 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6848}
6849
6850
6851/**
6852 * Implements WBINVD.
6853 */
6854IEM_CIMPL_DEF_0(iemCImpl_wbinvd)
6855{
6856 if (pVCpu->iem.s.uCpl != 0)
6857 {
6858 Log(("wbinvd: CPL != 0 -> #GP(0)\n"));
6859 return iemRaiseGeneralProtectionFault0(pVCpu);
6860 }
6861
6862 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6863 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_WBINVD, cbInstr);
6864
6865 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_WBINVD, SVM_EXIT_WBINVD, 0, 0);
6866
6867 /* We currently take no action here. */
6868 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6869}
6870
6871
6872/** Opcode 0x0f 0xaa. */
6873IEM_CIMPL_DEF_0(iemCImpl_rsm)
6874{
6875 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RSM, SVM_EXIT_RSM, 0, 0);
6876 NOREF(cbInstr);
6877 return iemRaiseUndefinedOpcode(pVCpu);
6878}
6879
6880
6881/**
6882 * Implements RDTSC.
6883 */
6884IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
6885{
6886 /*
6887 * Check preconditions.
6888 */
6889 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fTsc)
6890 return iemRaiseUndefinedOpcode(pVCpu);
6891
6892 if (pVCpu->iem.s.uCpl != 0)
6893 {
6894 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6895 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD)
6896 {
6897 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
6898 return iemRaiseGeneralProtectionFault0(pVCpu);
6899 }
6900 }
6901
6902 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6903 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDTSC_EXIT))
6904 {
6905 Log(("rdtsc: Guest intercept -> VM-exit\n"));
6906 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDTSC, cbInstr);
6907 }
6908
6909 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
6910 {
6911 Log(("rdtsc: Guest intercept -> #VMEXIT\n"));
6912 IEM_SVM_UPDATE_NRIP(pVCpu);
6913 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6914 }
6915
6916 /*
6917 * Do the job.
6918 */
6919 uint64_t uTicks = TMCpuTickGet(pVCpu);
6920#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
6921 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
6922#endif
6923 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks);
6924 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks);
6925 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX); /* For IEMExecDecodedRdtsc. */
6926 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6927}
6928
6929
6930/**
6931 * Implements RDTSC.
6932 */
6933IEM_CIMPL_DEF_0(iemCImpl_rdtscp)
6934{
6935 /*
6936 * Check preconditions.
6937 */
6938 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fRdTscP)
6939 return iemRaiseUndefinedOpcode(pVCpu);
6940
6941 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6942 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_RDTSCP))
6943 {
6944 Log(("rdtscp: Not enabled for VMX non-root mode -> #UD\n"));
6945 return iemRaiseUndefinedOpcode(pVCpu);
6946 }
6947
6948 if (pVCpu->iem.s.uCpl != 0)
6949 {
6950 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6951 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD)
6952 {
6953 Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl));
6954 return iemRaiseGeneralProtectionFault0(pVCpu);
6955 }
6956 }
6957
6958 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6959 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDTSC_EXIT))
6960 {
6961 Log(("rdtscp: Guest intercept -> VM-exit\n"));
6962 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDTSCP, cbInstr);
6963 }
6964 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
6965 {
6966 Log(("rdtscp: Guest intercept -> #VMEXIT\n"));
6967 IEM_SVM_UPDATE_NRIP(pVCpu);
6968 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6969 }
6970
6971 /*
6972 * Do the job.
6973 * Query the MSR first in case of trips to ring-3.
6974 */
6975 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TSC_AUX);
6976 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pVCpu->cpum.GstCtx.rcx);
6977 if (rcStrict == VINF_SUCCESS)
6978 {
6979 /* Low dword of the TSC_AUX msr only. */
6980 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff);
6981
6982 uint64_t uTicks = TMCpuTickGet(pVCpu);
6983#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
6984 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
6985#endif
6986 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks);
6987 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks);
6988 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX); /* For IEMExecDecodedRdtscp. */
6989 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6990 }
6991 return rcStrict;
6992}
6993
6994
6995/**
6996 * Implements RDPMC.
6997 */
6998IEM_CIMPL_DEF_0(iemCImpl_rdpmc)
6999{
7000 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
7001
7002 if ( pVCpu->iem.s.uCpl != 0
7003 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCE))
7004 return iemRaiseGeneralProtectionFault0(pVCpu);
7005
7006 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7007 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDPMC_EXIT))
7008 {
7009 Log(("rdpmc: Guest intercept -> VM-exit\n"));
7010 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDPMC, cbInstr);
7011 }
7012
7013 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
7014 {
7015 Log(("rdpmc: Guest intercept -> #VMEXIT\n"));
7016 IEM_SVM_UPDATE_NRIP(pVCpu);
7017 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7018 }
7019
7020 /** @todo Emulate performance counters, for now just return 0. */
7021 pVCpu->cpum.GstCtx.rax = 0;
7022 pVCpu->cpum.GstCtx.rdx = 0;
7023 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
7024 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
7025 * ecx but see @bugref{3472}! */
7026
7027 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7028}
7029
7030
7031/**
7032 * Implements RDMSR.
7033 */
7034IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
7035{
7036 /*
7037 * Check preconditions.
7038 */
7039 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
7040 return iemRaiseUndefinedOpcode(pVCpu);
7041 if (pVCpu->iem.s.uCpl != 0)
7042 return iemRaiseGeneralProtectionFault0(pVCpu);
7043
7044 /*
7045 * Check nested-guest intercepts.
7046 */
7047#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7048 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7049 {
7050 if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_RDMSR, pVCpu->cpum.GstCtx.ecx))
7051 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDMSR, cbInstr);
7052 }
7053#endif
7054
7055#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7056 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
7057 {
7058 VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, pVCpu->cpum.GstCtx.ecx, false /* fWrite */);
7059 if (rcStrict == VINF_SVM_VMEXIT)
7060 return VINF_SUCCESS;
7061 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7062 {
7063 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.ecx, VBOXSTRICTRC_VAL(rcStrict)));
7064 return rcStrict;
7065 }
7066 }
7067#endif
7068
7069 /*
7070 * Do the job.
7071 */
7072 RTUINT64U uValue;
7073 /** @todo make CPUMAllMsrs.cpp import the necessary MSR state. */
7074 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
7075
7076 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pVCpu->cpum.GstCtx.ecx, &uValue.u);
7077 if (rcStrict == VINF_SUCCESS)
7078 {
7079 pVCpu->cpum.GstCtx.rax = uValue.s.Lo;
7080 pVCpu->cpum.GstCtx.rdx = uValue.s.Hi;
7081 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
7082
7083 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7084 }
7085
7086#ifndef IN_RING3
7087 /* Deferred to ring-3. */
7088 if (rcStrict == VINF_CPUM_R3_MSR_READ)
7089 {
7090 Log(("IEM: rdmsr(%#x) -> ring-3\n", pVCpu->cpum.GstCtx.ecx));
7091 return rcStrict;
7092 }
7093#endif
7094
7095 /* Often a unimplemented MSR or MSR bit, so worth logging. */
7096 if (pVCpu->iem.s.cLogRelRdMsr < 32)
7097 {
7098 pVCpu->iem.s.cLogRelRdMsr++;
7099 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx));
7100 }
7101 else
7102 Log(( "IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx));
7103 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
7104 return iemRaiseGeneralProtectionFault0(pVCpu);
7105}
7106
7107
7108/**
7109 * Implements WRMSR.
7110 */
7111IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
7112{
7113 /*
7114 * Check preconditions.
7115 */
7116 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
7117 return iemRaiseUndefinedOpcode(pVCpu);
7118 if (pVCpu->iem.s.uCpl != 0)
7119 return iemRaiseGeneralProtectionFault0(pVCpu);
7120
7121 RTUINT64U uValue;
7122 uValue.s.Lo = pVCpu->cpum.GstCtx.eax;
7123 uValue.s.Hi = pVCpu->cpum.GstCtx.edx;
7124
7125 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7126
7127 /** @todo make CPUMAllMsrs.cpp import the necessary MSR state. */
7128 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
7129
7130 /*
7131 * Check nested-guest intercepts.
7132 */
7133#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7134 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7135 {
7136 if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_WRMSR, idMsr))
7137 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_WRMSR, cbInstr);
7138 }
7139#endif
7140
7141#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7142 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
7143 {
7144 VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, idMsr, true /* fWrite */);
7145 if (rcStrict == VINF_SVM_VMEXIT)
7146 return VINF_SUCCESS;
7147 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7148 {
7149 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", idMsr, VBOXSTRICTRC_VAL(rcStrict)));
7150 return rcStrict;
7151 }
7152 }
7153#endif
7154
7155 /*
7156 * Do the job.
7157 */
7158 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, idMsr, uValue.u);
7159 if (rcStrict == VINF_SUCCESS)
7160 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7161
7162#ifndef IN_RING3
7163 /* Deferred to ring-3. */
7164 if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
7165 {
7166 Log(("IEM: wrmsr(%#x) -> ring-3\n", idMsr));
7167 return rcStrict;
7168 }
7169#endif
7170
7171 /* Often a unimplemented MSR or MSR bit, so worth logging. */
7172 if (pVCpu->iem.s.cLogRelWrMsr < 32)
7173 {
7174 pVCpu->iem.s.cLogRelWrMsr++;
7175 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
7176 }
7177 else
7178 Log(( "IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
7179 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
7180 return iemRaiseGeneralProtectionFault0(pVCpu);
7181}
7182
7183
7184/**
7185 * Implements 'IN eAX, port'.
7186 *
7187 * @param u16Port The source port.
7188 * @param fImm Whether the port was specified through an immediate operand
7189 * or the implicit DX register.
7190 * @param cbReg The register size.
7191 */
7192IEM_CIMPL_DEF_3(iemCImpl_in, uint16_t, u16Port, bool, fImm, uint8_t, cbReg)
7193{
7194 /*
7195 * CPL check
7196 */
7197 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg);
7198 if (rcStrict != VINF_SUCCESS)
7199 return rcStrict;
7200
7201 /*
7202 * Check VMX nested-guest IO intercept.
7203 */
7204#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7205 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7206 {
7207 rcStrict = iemVmxVmexitInstrIo(pVCpu, VMXINSTRID_IO_IN, u16Port, fImm, cbReg, cbInstr);
7208 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
7209 return rcStrict;
7210 }
7211#else
7212 RT_NOREF(fImm);
7213#endif
7214
7215 /*
7216 * Check SVM nested-guest IO intercept.
7217 */
7218#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7219 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
7220 {
7221 uint8_t cAddrSizeBits;
7222 switch (pVCpu->iem.s.enmEffAddrMode)
7223 {
7224 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
7225 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
7226 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
7227 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7228 }
7229 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
7230 false /* fRep */, false /* fStrIo */, cbInstr);
7231 if (rcStrict == VINF_SVM_VMEXIT)
7232 return VINF_SUCCESS;
7233 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7234 {
7235 Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
7236 VBOXSTRICTRC_VAL(rcStrict)));
7237 return rcStrict;
7238 }
7239 }
7240#endif
7241
7242 /*
7243 * Perform the I/O.
7244 */
7245 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
7246 uint32_t u32Value = 0;
7247 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, cbReg);
7248 if (IOM_SUCCESS(rcStrict))
7249 {
7250 switch (cbReg)
7251 {
7252 case 1: pVCpu->cpum.GstCtx.al = (uint8_t)u32Value; break;
7253 case 2: pVCpu->cpum.GstCtx.ax = (uint16_t)u32Value; break;
7254 case 4: pVCpu->cpum.GstCtx.rax = u32Value; break;
7255 default: AssertFailedReturn(VERR_IEM_IPE_3);
7256 }
7257
7258 pVCpu->iem.s.cPotentialExits++;
7259 if (rcStrict != VINF_SUCCESS)
7260 iemSetPassUpStatus(pVCpu, rcStrict);
7261 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7262
7263 /*
7264 * Check for I/O breakpoints.
7265 */
7266 /** @todo this should set a internal flag and be raised by
7267 * iemRegAddToRipAndFinishingClearingRF! */
7268 uint32_t const uDr7 = pVCpu->cpum.GstCtx.dr[7];
7269 if (RT_UNLIKELY( ( ( (uDr7 & X86_DR7_ENABLED_MASK)
7270 && X86_DR7_ANY_RW_IO(uDr7)
7271 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE))
7272 || DBGFBpIsHwIoArmed(pVM))
7273 && rcStrict == VINF_SUCCESS))
7274 {
7275 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);
7276 rcStrict = DBGFBpCheckIo(pVM, pVCpu, IEM_GET_CTX(pVCpu), u16Port, cbReg);
7277 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7278 rcStrict = iemRaiseDebugException(pVCpu);
7279 }
7280 }
7281
7282 return rcStrict;
7283}
7284
7285
7286/**
7287 * Implements 'IN eAX, DX'.
7288 *
7289 * @param cbReg The register size.
7290 */
7291IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
7292{
7293 return IEM_CIMPL_CALL_3(iemCImpl_in, pVCpu->cpum.GstCtx.dx, false /* fImm */, cbReg);
7294}
7295
7296
7297/**
7298 * Implements 'OUT port, eAX'.
7299 *
7300 * @param u16Port The destination port.
7301 * @param fImm Whether the port was specified through an immediate operand
7302 * or the implicit DX register.
7303 * @param cbReg The register size.
7304 */
7305IEM_CIMPL_DEF_3(iemCImpl_out, uint16_t, u16Port, bool, fImm, uint8_t, cbReg)
7306{
7307 /*
7308 * CPL check
7309 */
7310 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg);
7311 if (rcStrict != VINF_SUCCESS)
7312 return rcStrict;
7313
7314 /*
7315 * Check VMX nested-guest I/O intercept.
7316 */
7317#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7318 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7319 {
7320 rcStrict = iemVmxVmexitInstrIo(pVCpu, VMXINSTRID_IO_OUT, u16Port, fImm, cbReg, cbInstr);
7321 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
7322 return rcStrict;
7323 }
7324#else
7325 RT_NOREF(fImm);
7326#endif
7327
7328 /*
7329 * Check SVM nested-guest I/O intercept.
7330 */
7331#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7332 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
7333 {
7334 uint8_t cAddrSizeBits;
7335 switch (pVCpu->iem.s.enmEffAddrMode)
7336 {
7337 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
7338 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
7339 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
7340 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7341 }
7342 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
7343 false /* fRep */, false /* fStrIo */, cbInstr);
7344 if (rcStrict == VINF_SVM_VMEXIT)
7345 return VINF_SUCCESS;
7346 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7347 {
7348 Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
7349 VBOXSTRICTRC_VAL(rcStrict)));
7350 return rcStrict;
7351 }
7352 }
7353#endif
7354
7355 /*
7356 * Perform the I/O.
7357 */
7358 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
7359 uint32_t u32Value;
7360 switch (cbReg)
7361 {
7362 case 1: u32Value = pVCpu->cpum.GstCtx.al; break;
7363 case 2: u32Value = pVCpu->cpum.GstCtx.ax; break;
7364 case 4: u32Value = pVCpu->cpum.GstCtx.eax; break;
7365 default: AssertFailedReturn(VERR_IEM_IPE_4);
7366 }
7367 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, u32Value, cbReg);
7368 if (IOM_SUCCESS(rcStrict))
7369 {
7370 pVCpu->iem.s.cPotentialExits++;
7371 if (rcStrict != VINF_SUCCESS)
7372 iemSetPassUpStatus(pVCpu, rcStrict);
7373 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7374
7375 /*
7376 * Check for I/O breakpoints.
7377 */
7378 /** @todo this should set a internal flag and be raised by
7379 * iemRegAddToRipAndFinishingClearingRF! */
7380 uint32_t const uDr7 = pVCpu->cpum.GstCtx.dr[7];
7381 if (RT_UNLIKELY( ( ( (uDr7 & X86_DR7_ENABLED_MASK)
7382 && X86_DR7_ANY_RW_IO(uDr7)
7383 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE))
7384 || DBGFBpIsHwIoArmed(pVM))
7385 && rcStrict == VINF_SUCCESS))
7386 {
7387 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);
7388 rcStrict = DBGFBpCheckIo(pVM, pVCpu, IEM_GET_CTX(pVCpu), u16Port, cbReg);
7389 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7390 rcStrict = iemRaiseDebugException(pVCpu);
7391 }
7392 }
7393 return rcStrict;
7394}
7395
7396
7397/**
7398 * Implements 'OUT DX, eAX'.
7399 *
7400 * @param cbReg The register size.
7401 */
7402IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
7403{
7404 return IEM_CIMPL_CALL_3(iemCImpl_out, pVCpu->cpum.GstCtx.dx, false /* fImm */, cbReg);
7405}
7406
7407
7408/**
7409 * Implements 'CLI'.
7410 */
7411IEM_CIMPL_DEF_0(iemCImpl_cli)
7412{
7413 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
7414#ifdef LOG_ENABLED
7415 uint32_t const fEflOld = fEfl;
7416#endif
7417
7418 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);
7419 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
7420 {
7421 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
7422 if (!(fEfl & X86_EFL_VM))
7423 {
7424 if (pVCpu->iem.s.uCpl <= uIopl)
7425 fEfl &= ~X86_EFL_IF;
7426 else if ( pVCpu->iem.s.uCpl == 3
7427 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI) )
7428 fEfl &= ~X86_EFL_VIF;
7429 else
7430 return iemRaiseGeneralProtectionFault0(pVCpu);
7431 }
7432 /* V8086 */
7433 else if (uIopl == 3)
7434 fEfl &= ~X86_EFL_IF;
7435 else if ( uIopl < 3
7436 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) )
7437 fEfl &= ~X86_EFL_VIF;
7438 else
7439 return iemRaiseGeneralProtectionFault0(pVCpu);
7440 }
7441 /* real mode */
7442 else
7443 fEfl &= ~X86_EFL_IF;
7444
7445 /* Commit. */
7446 IEMMISC_SET_EFL(pVCpu, fEfl);
7447 VBOXSTRICTRC const rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7448 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl));
7449 return rcStrict;
7450}
7451
7452
7453/**
7454 * Implements 'STI'.
7455 */
7456IEM_CIMPL_DEF_0(iemCImpl_sti)
7457{
7458 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
7459 uint32_t const fEflOld = fEfl;
7460
7461 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);
7462 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
7463 {
7464 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
7465 if (!(fEfl & X86_EFL_VM))
7466 {
7467 if (pVCpu->iem.s.uCpl <= uIopl)
7468 fEfl |= X86_EFL_IF;
7469 else if ( pVCpu->iem.s.uCpl == 3
7470 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI)
7471 && !(fEfl & X86_EFL_VIP) )
7472 fEfl |= X86_EFL_VIF;
7473 else
7474 return iemRaiseGeneralProtectionFault0(pVCpu);
7475 }
7476 /* V8086 */
7477 else if (uIopl == 3)
7478 fEfl |= X86_EFL_IF;
7479 else if ( uIopl < 3
7480 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME)
7481 && !(fEfl & X86_EFL_VIP) )
7482 fEfl |= X86_EFL_VIF;
7483 else
7484 return iemRaiseGeneralProtectionFault0(pVCpu);
7485 }
7486 /* real mode */
7487 else
7488 fEfl |= X86_EFL_IF;
7489
7490 /*
7491 * Commit.
7492 *
7493 * Note! Setting the shadow interrupt flag must be done after RIP updating.
7494 */
7495 IEMMISC_SET_EFL(pVCpu, fEfl);
7496 VBOXSTRICTRC const rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7497 if (!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF))
7498 {
7499 /** @todo only set it the shadow flag if it was clear before? */
7500 CPUMSetInInterruptShadowSti(&pVCpu->cpum.GstCtx);
7501 }
7502 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
7503 return rcStrict;
7504}
7505
7506
7507/**
7508 * Implements 'HLT'.
7509 */
7510IEM_CIMPL_DEF_0(iemCImpl_hlt)
7511{
7512 if (pVCpu->iem.s.uCpl != 0)
7513 return iemRaiseGeneralProtectionFault0(pVCpu);
7514
7515 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7516 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_HLT_EXIT))
7517 {
7518 Log2(("hlt: Guest intercept -> VM-exit\n"));
7519 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_HLT, cbInstr);
7520 }
7521
7522 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_HLT))
7523 {
7524 Log2(("hlt: Guest intercept -> #VMEXIT\n"));
7525 IEM_SVM_UPDATE_NRIP(pVCpu);
7526 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7527 }
7528
7529 /** @todo finish: This ASSUMES that iemRegAddToRipAndFinishingClearingRF won't
7530 * be returning any status codes relating to non-guest events being raised, as
7531 * we'll mess up the guest HALT otherwise. */
7532 VBOXSTRICTRC rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7533 if (rcStrict == VINF_SUCCESS)
7534 rcStrict = VINF_EM_HALT;
7535 return rcStrict;
7536}
7537
7538
7539/**
7540 * Implements 'MONITOR'.
7541 */
7542IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
7543{
7544 /*
7545 * Permission checks.
7546 */
7547 if (pVCpu->iem.s.uCpl != 0)
7548 {
7549 Log2(("monitor: CPL != 0\n"));
7550 return iemRaiseUndefinedOpcode(pVCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
7551 }
7552 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
7553 {
7554 Log2(("monitor: Not in CPUID\n"));
7555 return iemRaiseUndefinedOpcode(pVCpu);
7556 }
7557
7558 /*
7559 * Check VMX guest-intercept.
7560 * This should be considered a fault-like VM-exit.
7561 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
7562 */
7563 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7564 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_MONITOR_EXIT))
7565 {
7566 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
7567 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_MONITOR, cbInstr);
7568 }
7569
7570 /*
7571 * Gather the operands and validate them.
7572 */
7573 RTGCPTR GCPtrMem = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
7574 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
7575 uint32_t uEdx = pVCpu->cpum.GstCtx.edx;
7576/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
7577 * \#GP first. */
7578 if (uEcx != 0)
7579 {
7580 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx);
7581 return iemRaiseGeneralProtectionFault0(pVCpu);
7582 }
7583
7584 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
7585 if (rcStrict != VINF_SUCCESS)
7586 return rcStrict;
7587
7588 RTGCPHYS GCPhysMem;
7589 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
7590 if (rcStrict != VINF_SUCCESS)
7591 return rcStrict;
7592
7593#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7594 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7595 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
7596 {
7597 /*
7598 * MONITOR does not access the memory, just monitors the address. However,
7599 * if the address falls in the APIC-access page, the address monitored must
7600 * instead be the corresponding address in the virtual-APIC page.
7601 *
7602 * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
7603 */
7604 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA);
7605 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
7606 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
7607 return rcStrict;
7608 }
7609#endif
7610
7611 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
7612 {
7613 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
7614 IEM_SVM_UPDATE_NRIP(pVCpu);
7615 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7616 }
7617
7618 /*
7619 * Call EM to prepare the monitor/wait.
7620 */
7621 rcStrict = EMMonitorWaitPrepare(pVCpu, pVCpu->cpum.GstCtx.rax, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.rdx, GCPhysMem);
7622 Assert(rcStrict == VINF_SUCCESS);
7623 if (rcStrict == VINF_SUCCESS)
7624 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7625 return rcStrict;
7626}
7627
7628
7629/**
7630 * Implements 'MWAIT'.
7631 */
7632IEM_CIMPL_DEF_0(iemCImpl_mwait)
7633{
7634 /*
7635 * Permission checks.
7636 */
7637 if (pVCpu->iem.s.uCpl != 0)
7638 {
7639 Log2(("mwait: CPL != 0\n"));
7640 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
7641 * EFLAGS.VM then.) */
7642 return iemRaiseUndefinedOpcode(pVCpu);
7643 }
7644 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
7645 {
7646 Log2(("mwait: Not in CPUID\n"));
7647 return iemRaiseUndefinedOpcode(pVCpu);
7648 }
7649
7650 /* Check VMX nested-guest intercept. */
7651 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7652 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_MWAIT_EXIT))
7653 IEM_VMX_VMEXIT_MWAIT_RET(pVCpu, EMMonitorIsArmed(pVCpu), cbInstr);
7654
7655 /*
7656 * Gather the operands and validate them.
7657 */
7658 uint32_t const uEax = pVCpu->cpum.GstCtx.eax;
7659 uint32_t const uEcx = pVCpu->cpum.GstCtx.ecx;
7660 if (uEcx != 0)
7661 {
7662 /* Only supported extension is break on IRQ when IF=0. */
7663 if (uEcx > 1)
7664 {
7665 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
7666 return iemRaiseGeneralProtectionFault0(pVCpu);
7667 }
7668 uint32_t fMWaitFeatures = 0;
7669 uint32_t uIgnore = 0;
7670 CPUMGetGuestCpuId(pVCpu, 5, 0, -1 /*f64BitMode*/, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
7671 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
7672 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
7673 {
7674 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
7675 return iemRaiseGeneralProtectionFault0(pVCpu);
7676 }
7677
7678#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7679 /*
7680 * If the interrupt-window exiting control is set or a virtual-interrupt is pending
7681 * for delivery; and interrupts are disabled the processor does not enter its
7682 * mwait state but rather passes control to the next instruction.
7683 *
7684 * See Intel spec. 25.3 "Changes to Instruction Behavior In VMX Non-root Operation".
7685 */
7686 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7687 && !pVCpu->cpum.GstCtx.eflags.Bits.u1IF)
7688 {
7689 if ( IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INT_WINDOW_EXIT)
7690 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
7691 /** @todo finish: check up this out after we move int window stuff out of the
7692 * run loop and into the instruction finishing logic here. */
7693 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7694 }
7695#endif
7696 }
7697
7698 /*
7699 * Check SVM nested-guest mwait intercepts.
7700 */
7701 if ( IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT_ARMED)
7702 && EMMonitorIsArmed(pVCpu))
7703 {
7704 Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n"));
7705 IEM_SVM_UPDATE_NRIP(pVCpu);
7706 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7707 }
7708 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
7709 {
7710 Log2(("mwait: Guest intercept -> #VMEXIT\n"));
7711 IEM_SVM_UPDATE_NRIP(pVCpu);
7712 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7713 }
7714
7715 /*
7716 * Call EM to prepare the monitor/wait.
7717 *
7718 * This will return VINF_EM_HALT. If there the trap flag is set, we may
7719 * override it when executing iemRegAddToRipAndFinishingClearingRF ASSUMING
7720 * that will only return guest related events.
7721 */
7722 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(pVCpu, uEax, uEcx);
7723
7724 /** @todo finish: This needs more thinking as we should suppress internal
7725 * debugger events here, or we'll bugger up the guest state even more than we
7726 * alread do around VINF_EM_HALT. */
7727 VBOXSTRICTRC rcStrict2 = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7728 if (rcStrict2 != VINF_SUCCESS)
7729 {
7730 Log2(("mwait: %Rrc (perform) -> %Rrc (finish)!\n", VBOXSTRICTRC_VAL(rcStrict), VBOXSTRICTRC_VAL(rcStrict2) ));
7731 rcStrict = rcStrict2;
7732 }
7733
7734 return rcStrict;
7735}
7736
7737
7738/**
7739 * Implements 'SWAPGS'.
7740 */
7741IEM_CIMPL_DEF_0(iemCImpl_swapgs)
7742{
7743 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); /* Caller checks this. */
7744
7745 /*
7746 * Permission checks.
7747 */
7748 if (pVCpu->iem.s.uCpl != 0)
7749 {
7750 Log2(("swapgs: CPL != 0\n"));
7751 return iemRaiseUndefinedOpcode(pVCpu);
7752 }
7753
7754 /*
7755 * Do the job.
7756 */
7757 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_GS);
7758 uint64_t uOtherGsBase = pVCpu->cpum.GstCtx.msrKERNELGSBASE;
7759 pVCpu->cpum.GstCtx.msrKERNELGSBASE = pVCpu->cpum.GstCtx.gs.u64Base;
7760 pVCpu->cpum.GstCtx.gs.u64Base = uOtherGsBase;
7761
7762 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7763}
7764
7765
7766#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
7767/**
7768 * Handles a CPUID call.
7769 */
7770static VBOXSTRICTRC iemCpuIdVBoxCall(PVMCPUCC pVCpu, uint32_t iFunction,
7771 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
7772{
7773 switch (iFunction)
7774 {
7775 case VBOX_CPUID_FN_ID:
7776 LogFlow(("iemCpuIdVBoxCall: VBOX_CPUID_FN_ID\n"));
7777 *pEax = VBOX_CPUID_RESP_ID_EAX;
7778 *pEbx = VBOX_CPUID_RESP_ID_EBX;
7779 *pEcx = VBOX_CPUID_RESP_ID_ECX;
7780 *pEdx = VBOX_CPUID_RESP_ID_EDX;
7781 break;
7782
7783 case VBOX_CPUID_FN_LOG:
7784 {
7785 CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX | CPUMCTX_EXTRN_RSI
7786 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7787
7788 /* Validate input. */
7789 uint32_t cchToLog = *pEdx;
7790 if (cchToLog <= _2M)
7791 {
7792 uint32_t const uLogPicker = *pEbx;
7793 if (uLogPicker <= 1)
7794 {
7795 /* Resolve the logger. */
7796 PRTLOGGER const pLogger = !uLogPicker
7797 ? RTLogDefaultInstanceEx(UINT32_MAX) : RTLogRelGetDefaultInstanceEx(UINT32_MAX);
7798 if (pLogger)
7799 {
7800 /* Copy over the data: */
7801 RTGCPTR GCPtrSrc = pVCpu->cpum.GstCtx.rsi;
7802 while (cchToLog > 0)
7803 {
7804 uint32_t cbToMap = GUEST_PAGE_SIZE - (GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
7805 if (cbToMap > cchToLog)
7806 cbToMap = cchToLog;
7807 /** @todo Extend iemMemMap to allowing page size accessing and avoid 7
7808 * unnecessary calls & iterations per pages. */
7809 if (cbToMap > 512)
7810 cbToMap = 512;
7811 void *pvSrc = NULL;
7812 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvSrc, cbToMap, UINT8_MAX, GCPtrSrc, IEM_ACCESS_DATA_R, 0);
7813 if (rcStrict == VINF_SUCCESS)
7814 {
7815 RTLogBulkNestedWrite(pLogger, (const char *)pvSrc, cbToMap, "Gst:");
7816 rcStrict = iemMemCommitAndUnmap(pVCpu, pvSrc, IEM_ACCESS_DATA_R);
7817 AssertRCSuccessReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
7818 }
7819 else
7820 {
7821 Log(("iemCpuIdVBoxCall: %Rrc at %RGp LB %#x\n", VBOXSTRICTRC_VAL(rcStrict), GCPtrSrc, cbToMap));
7822 return rcStrict;
7823 }
7824
7825 /* Advance. */
7826 pVCpu->cpum.GstCtx.rsi = GCPtrSrc += cbToMap;
7827 *pEdx = cchToLog -= cbToMap;
7828 }
7829 *pEax = VINF_SUCCESS;
7830 }
7831 else
7832 *pEax = (uint32_t)VERR_NOT_FOUND;
7833 }
7834 else
7835 *pEax = (uint32_t)VERR_NOT_FOUND;
7836 }
7837 else
7838 *pEax = (uint32_t)VERR_TOO_MUCH_DATA;
7839 *pEdx = VBOX_CPUID_RESP_GEN_EDX;
7840 *pEcx = VBOX_CPUID_RESP_GEN_ECX;
7841 *pEbx = VBOX_CPUID_RESP_GEN_EBX;
7842 break;
7843 }
7844
7845 default:
7846 LogFlow(("iemCpuIdVBoxCall: Invalid function %#x (%#x, %#x)\n", iFunction, *pEbx, *pEdx));
7847 *pEax = (uint32_t)VERR_INVALID_FUNCTION;
7848 *pEbx = (uint32_t)VERR_INVALID_FUNCTION;
7849 *pEcx = (uint32_t)VERR_INVALID_FUNCTION;
7850 *pEdx = (uint32_t)VERR_INVALID_FUNCTION;
7851 break;
7852 }
7853 return VINF_SUCCESS;
7854}
7855#endif /* VBOX_WITHOUT_CPUID_HOST_CALL */
7856
7857/**
7858 * Implements 'CPUID'.
7859 */
7860IEM_CIMPL_DEF_0(iemCImpl_cpuid)
7861{
7862 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7863 {
7864 Log2(("cpuid: Guest intercept -> VM-exit\n"));
7865 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_CPUID, cbInstr);
7866 }
7867
7868 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
7869 {
7870 Log2(("cpuid: Guest intercept -> #VMEXIT\n"));
7871 IEM_SVM_UPDATE_NRIP(pVCpu);
7872 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7873 }
7874
7875
7876 uint32_t const uEax = pVCpu->cpum.GstCtx.eax;
7877 uint32_t const uEcx = pVCpu->cpum.GstCtx.ecx;
7878
7879#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
7880 /*
7881 * CPUID host call backdoor.
7882 */
7883 if ( uEax == VBOX_CPUID_REQ_EAX_FIXED
7884 && (uEcx & VBOX_CPUID_REQ_ECX_FIXED_MASK) == VBOX_CPUID_REQ_ECX_FIXED
7885 && pVCpu->CTX_SUFF(pVM)->iem.s.fCpuIdHostCall)
7886 {
7887 VBOXSTRICTRC rcStrict = iemCpuIdVBoxCall(pVCpu, uEcx & VBOX_CPUID_REQ_ECX_FN_MASK,
7888 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx,
7889 &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
7890 if (rcStrict != VINF_SUCCESS)
7891 return rcStrict;
7892 }
7893 /*
7894 * Regular CPUID.
7895 */
7896 else
7897#endif
7898 CPUMGetGuestCpuId(pVCpu, uEax, uEcx, pVCpu->cpum.GstCtx.cs.Attr.n.u1Long,
7899 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
7900
7901 pVCpu->cpum.GstCtx.rax &= UINT32_C(0xffffffff);
7902 pVCpu->cpum.GstCtx.rbx &= UINT32_C(0xffffffff);
7903 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff);
7904 pVCpu->cpum.GstCtx.rdx &= UINT32_C(0xffffffff);
7905 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
7906
7907 pVCpu->iem.s.cPotentialExits++;
7908 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7909}
7910
7911
7912/**
7913 * Implements 'AAD'.
7914 *
7915 * @param bImm The immediate operand.
7916 */
7917IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
7918{
7919 uint16_t const ax = pVCpu->cpum.GstCtx.ax;
7920 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
7921 pVCpu->cpum.GstCtx.ax = al;
7922 iemHlpUpdateArithEFlagsU8(pVCpu, al,
7923 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
7924 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
7925
7926 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7927}
7928
7929
7930/**
7931 * Implements 'AAM'.
7932 *
7933 * @param bImm The immediate operand. Cannot be 0.
7934 */
7935IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
7936{
7937 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
7938
7939 uint16_t const ax = pVCpu->cpum.GstCtx.ax;
7940 uint8_t const al = (uint8_t)ax % bImm;
7941 uint8_t const ah = (uint8_t)ax / bImm;
7942 pVCpu->cpum.GstCtx.ax = (ah << 8) + al;
7943 iemHlpUpdateArithEFlagsU8(pVCpu, al,
7944 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
7945 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
7946
7947 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7948}
7949
7950
7951/**
7952 * Implements 'DAA'.
7953 */
7954IEM_CIMPL_DEF_0(iemCImpl_daa)
7955{
7956 uint8_t const al = pVCpu->cpum.GstCtx.al;
7957 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF;
7958
7959 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
7960 || (al & 0xf) >= 10)
7961 {
7962 pVCpu->cpum.GstCtx.al = al + 6;
7963 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
7964 }
7965 else
7966 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
7967
7968 if (al >= 0x9a || fCarry)
7969 {
7970 pVCpu->cpum.GstCtx.al += 0x60;
7971 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
7972 }
7973 else
7974 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
7975
7976 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
7977 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7978}
7979
7980
7981/**
7982 * Implements 'DAS'.
7983 */
7984IEM_CIMPL_DEF_0(iemCImpl_das)
7985{
7986 uint8_t const uInputAL = pVCpu->cpum.GstCtx.al;
7987 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF;
7988
7989 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
7990 || (uInputAL & 0xf) >= 10)
7991 {
7992 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
7993 if (uInputAL < 6)
7994 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
7995 pVCpu->cpum.GstCtx.al = uInputAL - 6;
7996 }
7997 else
7998 {
7999 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8000 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8001 }
8002
8003 if (uInputAL >= 0x9a || fCarry)
8004 {
8005 pVCpu->cpum.GstCtx.al -= 0x60;
8006 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8007 }
8008
8009 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8010 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8011}
8012
8013
8014/**
8015 * Implements 'AAA'.
8016 */
8017IEM_CIMPL_DEF_0(iemCImpl_aaa)
8018{
8019 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
8020 {
8021 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8022 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8023 {
8024 iemAImpl_add_u16(&pVCpu->cpum.GstCtx.ax, 0x106, &pVCpu->cpum.GstCtx.eflags.uBoth);
8025 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8026 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8027 }
8028 else
8029 {
8030 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8031 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8032 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8033 }
8034 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8035 }
8036 else
8037 {
8038 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8039 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8040 {
8041 pVCpu->cpum.GstCtx.ax += UINT16_C(0x106);
8042 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8043 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8044 }
8045 else
8046 {
8047 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8048 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8049 }
8050 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8051 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8052 }
8053
8054 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8055}
8056
8057
8058/**
8059 * Implements 'AAS'.
8060 */
8061IEM_CIMPL_DEF_0(iemCImpl_aas)
8062{
8063 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
8064 {
8065 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8066 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8067 {
8068 iemAImpl_sub_u16(&pVCpu->cpum.GstCtx.ax, 0x106, &pVCpu->cpum.GstCtx.eflags.uBoth);
8069 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8070 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8071 }
8072 else
8073 {
8074 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8075 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8076 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8077 }
8078 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8079 }
8080 else
8081 {
8082 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8083 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8084 {
8085 pVCpu->cpum.GstCtx.ax -= UINT16_C(0x106);
8086 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8087 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8088 }
8089 else
8090 {
8091 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8092 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8093 }
8094 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8095 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8096 }
8097
8098 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8099}
8100
8101
8102/**
8103 * Implements the 16-bit version of 'BOUND'.
8104 *
8105 * @note We have separate 16-bit and 32-bit variants of this function due to
8106 * the decoder using unsigned parameters, whereas we want signed one to
8107 * do the job. This is significant for a recompiler.
8108 */
8109IEM_CIMPL_DEF_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound)
8110{
8111 /*
8112 * Check if the index is inside the bounds, otherwise raise #BR.
8113 */
8114 if ( idxArray >= idxLowerBound
8115 && idxArray <= idxUpperBound)
8116 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8117 return iemRaiseBoundRangeExceeded(pVCpu);
8118}
8119
8120
8121/**
8122 * Implements the 32-bit version of 'BOUND'.
8123 */
8124IEM_CIMPL_DEF_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound)
8125{
8126 /*
8127 * Check if the index is inside the bounds, otherwise raise #BR.
8128 */
8129 if ( idxArray >= idxLowerBound
8130 && idxArray <= idxUpperBound)
8131 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8132 return iemRaiseBoundRangeExceeded(pVCpu);
8133}
8134
8135
8136
8137/*
8138 * Instantiate the various string operation combinations.
8139 */
8140#define OP_SIZE 8
8141#define ADDR_SIZE 16
8142#include "IEMAllCImplStrInstr.cpp.h"
8143#define OP_SIZE 8
8144#define ADDR_SIZE 32
8145#include "IEMAllCImplStrInstr.cpp.h"
8146#define OP_SIZE 8
8147#define ADDR_SIZE 64
8148#include "IEMAllCImplStrInstr.cpp.h"
8149
8150#define OP_SIZE 16
8151#define ADDR_SIZE 16
8152#include "IEMAllCImplStrInstr.cpp.h"
8153#define OP_SIZE 16
8154#define ADDR_SIZE 32
8155#include "IEMAllCImplStrInstr.cpp.h"
8156#define OP_SIZE 16
8157#define ADDR_SIZE 64
8158#include "IEMAllCImplStrInstr.cpp.h"
8159
8160#define OP_SIZE 32
8161#define ADDR_SIZE 16
8162#include "IEMAllCImplStrInstr.cpp.h"
8163#define OP_SIZE 32
8164#define ADDR_SIZE 32
8165#include "IEMAllCImplStrInstr.cpp.h"
8166#define OP_SIZE 32
8167#define ADDR_SIZE 64
8168#include "IEMAllCImplStrInstr.cpp.h"
8169
8170#define OP_SIZE 64
8171#define ADDR_SIZE 32
8172#include "IEMAllCImplStrInstr.cpp.h"
8173#define OP_SIZE 64
8174#define ADDR_SIZE 64
8175#include "IEMAllCImplStrInstr.cpp.h"
8176
8177
8178/**
8179 * Implements 'XGETBV'.
8180 */
8181IEM_CIMPL_DEF_0(iemCImpl_xgetbv)
8182{
8183 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
8184 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)
8185 {
8186 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
8187 switch (uEcx)
8188 {
8189 case 0:
8190 break;
8191
8192 case 1: /** @todo Implement XCR1 support. */
8193 default:
8194 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx));
8195 return iemRaiseGeneralProtectionFault0(pVCpu);
8196
8197 }
8198 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx);
8199 pVCpu->cpum.GstCtx.rax = RT_LO_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]);
8200 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]);
8201
8202 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8203 }
8204 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n"));
8205 return iemRaiseUndefinedOpcode(pVCpu);
8206}
8207
8208
8209/**
8210 * Implements 'XSETBV'.
8211 */
8212IEM_CIMPL_DEF_0(iemCImpl_xsetbv)
8213{
8214 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)
8215 {
8216 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
8217 {
8218 Log2(("xsetbv: Guest intercept -> #VMEXIT\n"));
8219 IEM_SVM_UPDATE_NRIP(pVCpu);
8220 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
8221 }
8222
8223 if (pVCpu->iem.s.uCpl == 0)
8224 {
8225 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx);
8226
8227 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8228 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_XSETBV, cbInstr);
8229
8230 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
8231 uint64_t uNewValue = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx);
8232 switch (uEcx)
8233 {
8234 case 0:
8235 {
8236 int rc = CPUMSetGuestXcr0(pVCpu, uNewValue);
8237 if (rc == VINF_SUCCESS)
8238 break;
8239 Assert(rc == VERR_CPUM_RAISE_GP_0);
8240 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
8241 return iemRaiseGeneralProtectionFault0(pVCpu);
8242 }
8243
8244 case 1: /** @todo Implement XCR1 support. */
8245 default:
8246 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
8247 return iemRaiseGeneralProtectionFault0(pVCpu);
8248
8249 }
8250
8251 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8252 }
8253
8254 Log(("xsetbv cpl=%u -> GP(0)\n", pVCpu->iem.s.uCpl));
8255 return iemRaiseGeneralProtectionFault0(pVCpu);
8256 }
8257 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n"));
8258 return iemRaiseUndefinedOpcode(pVCpu);
8259}
8260
8261#ifndef RT_ARCH_ARM64
8262# ifdef IN_RING3
8263
8264/** Argument package for iemCImpl_cmpxchg16b_fallback_rendezvous_callback. */
8265struct IEMCIMPLCX16ARGS
8266{
8267 PRTUINT128U pu128Dst;
8268 PRTUINT128U pu128RaxRdx;
8269 PRTUINT128U pu128RbxRcx;
8270 uint32_t *pEFlags;
8271# ifdef VBOX_STRICT
8272 uint32_t cCalls;
8273# endif
8274};
8275
8276/**
8277 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
8278 * Worker for iemCImpl_cmpxchg16b_fallback_rendezvous}
8279 */
8280static DECLCALLBACK(VBOXSTRICTRC) iemCImpl_cmpxchg16b_fallback_rendezvous_callback(PVM pVM, PVMCPUCC pVCpu, void *pvUser)
8281{
8282 RT_NOREF(pVM, pVCpu);
8283 struct IEMCIMPLCX16ARGS *pArgs = (struct IEMCIMPLCX16ARGS *)pvUser;
8284# ifdef VBOX_STRICT
8285 Assert(pArgs->cCalls == 0);
8286 pArgs->cCalls++;
8287# endif
8288
8289 iemAImpl_cmpxchg16b_fallback(pArgs->pu128Dst, pArgs->pu128RaxRdx, pArgs->pu128RbxRcx, pArgs->pEFlags);
8290 return VINF_SUCCESS;
8291}
8292
8293# endif /* IN_RING3 */
8294
8295/**
8296 * Implements 'CMPXCHG16B' fallback using rendezvous.
8297 */
8298IEM_CIMPL_DEF_4(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
8299 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags)
8300{
8301# ifdef IN_RING3
8302 struct IEMCIMPLCX16ARGS Args;
8303 Args.pu128Dst = pu128Dst;
8304 Args.pu128RaxRdx = pu128RaxRdx;
8305 Args.pu128RbxRcx = pu128RbxRcx;
8306 Args.pEFlags = pEFlags;
8307# ifdef VBOX_STRICT
8308 Args.cCalls = 0;
8309# endif
8310 VBOXSTRICTRC rcStrict = VMMR3EmtRendezvous(pVCpu->CTX_SUFF(pVM), VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
8311 iemCImpl_cmpxchg16b_fallback_rendezvous_callback, &Args);
8312 Assert(Args.cCalls == 1);
8313 if (rcStrict == VINF_SUCCESS)
8314 {
8315 /* Duplicated tail code. */
8316 rcStrict = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_RW);
8317 if (rcStrict == VINF_SUCCESS)
8318 {
8319 pVCpu->cpum.GstCtx.eflags.u = *pEFlags; /* IEM_MC_COMMIT_EFLAGS */
8320 if (!(*pEFlags & X86_EFL_ZF))
8321 {
8322 pVCpu->cpum.GstCtx.rax = pu128RaxRdx->s.Lo;
8323 pVCpu->cpum.GstCtx.rdx = pu128RaxRdx->s.Hi;
8324 }
8325 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8326 }
8327 }
8328 return rcStrict;
8329# else
8330 RT_NOREF(pVCpu, cbInstr, pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags);
8331 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; /* This should get us to ring-3 for now. Should perhaps be replaced later. */
8332# endif
8333}
8334
8335#endif /* RT_ARCH_ARM64 */
8336
8337/**
8338 * Implements 'CLFLUSH' and 'CLFLUSHOPT'.
8339 *
8340 * This is implemented in C because it triggers a load like behaviour without
8341 * actually reading anything. Since that's not so common, it's implemented
8342 * here.
8343 *
8344 * @param iEffSeg The effective segment.
8345 * @param GCPtrEff The address of the image.
8346 */
8347IEM_CIMPL_DEF_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
8348{
8349 /*
8350 * Pretend to do a load w/o reading (see also iemCImpl_monitor and iemMemMap).
8351 */
8352 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrEff);
8353 if (rcStrict == VINF_SUCCESS)
8354 {
8355 RTGCPHYS GCPhysMem;
8356 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrEff, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
8357 if (rcStrict == VINF_SUCCESS)
8358 {
8359#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8360 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8361 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
8362 {
8363 /*
8364 * CLFLUSH/CLFLUSHOPT does not access the memory, but flushes the cache-line
8365 * that contains the address. However, if the address falls in the APIC-access
8366 * page, the address flushed must instead be the corresponding address in the
8367 * virtual-APIC page.
8368 *
8369 * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
8370 */
8371 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA);
8372 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
8373 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
8374 return rcStrict;
8375 }
8376#endif
8377 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8378 }
8379 }
8380
8381 return rcStrict;
8382}
8383
8384
8385/**
8386 * Implements 'FINIT' and 'FNINIT'.
8387 *
8388 * @param fCheckXcpts Whether to check for umasked pending exceptions or
8389 * not.
8390 */
8391IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
8392{
8393 /*
8394 * Exceptions.
8395 */
8396 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8397 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS))
8398 return iemRaiseDeviceNotAvailable(pVCpu);
8399
8400 iemFpuActualizeStateForChange(pVCpu);
8401 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_X87);
8402
8403 /* FINIT: Raise #MF on pending exception(s): */
8404 if (fCheckXcpts && (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES))
8405 return iemRaiseMathFault(pVCpu);
8406
8407 /*
8408 * Reset the state.
8409 */
8410 PX86XSAVEAREA pXState = &pVCpu->cpum.GstCtx.XState;
8411
8412 /* Rotate the stack to account for changed TOS. */
8413 iemFpuRotateStackSetTop(&pXState->x87, 0);
8414
8415 pXState->x87.FCW = 0x37f;
8416 pXState->x87.FSW = 0;
8417 pXState->x87.FTW = 0x00; /* 0 - empty. */
8418 /** @todo Intel says the instruction and data pointers are not cleared on
8419 * 387, presume that 8087 and 287 doesn't do so either. */
8420 /** @todo test this stuff. */
8421 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
8422 {
8423 pXState->x87.FPUDP = 0;
8424 pXState->x87.DS = 0; //??
8425 pXState->x87.Rsrvd2 = 0;
8426 pXState->x87.FPUIP = 0;
8427 pXState->x87.CS = 0; //??
8428 pXState->x87.Rsrvd1 = 0;
8429 }
8430 pXState->x87.FOP = 0;
8431
8432 iemHlpUsedFpu(pVCpu);
8433 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8434}
8435
8436
8437/**
8438 * Implements 'FXSAVE'.
8439 *
8440 * @param iEffSeg The effective segment.
8441 * @param GCPtrEff The address of the image.
8442 * @param enmEffOpSize The operand size (only REX.W really matters).
8443 */
8444IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8445{
8446 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8447
8448 /*
8449 * Raise exceptions.
8450 */
8451 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
8452 return iemRaiseDeviceNotAvailable(pVCpu);
8453
8454 /*
8455 * Access the memory.
8456 */
8457 void *pvMem512;
8458 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
8459 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8460 if (rcStrict != VINF_SUCCESS)
8461 return rcStrict;
8462 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
8463 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.XState.x87;
8464
8465 /*
8466 * Store the registers.
8467 */
8468 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
8469 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
8470
8471 /* common for all formats */
8472 pDst->FCW = pSrc->FCW;
8473 pDst->FSW = pSrc->FSW;
8474 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8475 pDst->FOP = pSrc->FOP;
8476 pDst->MXCSR = pSrc->MXCSR;
8477 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8478 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8479 {
8480 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
8481 * them for now... */
8482 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8483 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8484 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8485 pDst->aRegs[i].au32[3] = 0;
8486 }
8487
8488 /* FPU IP, CS, DP and DS. */
8489 pDst->FPUIP = pSrc->FPUIP;
8490 pDst->CS = pSrc->CS;
8491 pDst->FPUDP = pSrc->FPUDP;
8492 pDst->DS = pSrc->DS;
8493 if (enmEffOpSize == IEMMODE_64BIT)
8494 {
8495 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8496 pDst->Rsrvd1 = pSrc->Rsrvd1;
8497 pDst->Rsrvd2 = pSrc->Rsrvd2;
8498 }
8499 else
8500 {
8501 pDst->Rsrvd1 = 0;
8502 pDst->Rsrvd2 = 0;
8503 }
8504
8505 /* XMM registers. */
8506 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR)
8507 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
8508 || pVCpu->iem.s.uCpl != 0)
8509 {
8510 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8511 for (uint32_t i = 0; i < cXmmRegs; i++)
8512 pDst->aXMM[i] = pSrc->aXMM[i];
8513 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8514 * right? */
8515 }
8516
8517 /*
8518 * Commit the memory.
8519 */
8520 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8521 if (rcStrict != VINF_SUCCESS)
8522 return rcStrict;
8523
8524 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8525}
8526
8527
8528/**
8529 * Implements 'FXRSTOR'.
8530 *
8531 * @param iEffSeg The effective segment register for @a GCPtrEff.
8532 * @param GCPtrEff The address of the image.
8533 * @param enmEffOpSize The operand size (only REX.W really matters).
8534 */
8535IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8536{
8537 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8538
8539 /*
8540 * Raise exceptions.
8541 */
8542 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
8543 return iemRaiseDeviceNotAvailable(pVCpu);
8544
8545 /*
8546 * Access the memory.
8547 */
8548 void *pvMem512;
8549 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
8550 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8551 if (rcStrict != VINF_SUCCESS)
8552 return rcStrict;
8553 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
8554 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.XState.x87;
8555
8556 /*
8557 * Check the state for stuff which will #GP(0).
8558 */
8559 uint32_t const fMXCSR = pSrc->MXCSR;
8560 uint32_t const fMXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8561 if (fMXCSR & ~fMXCSR_MASK)
8562 {
8563 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
8564 return iemRaiseGeneralProtectionFault0(pVCpu);
8565 }
8566
8567 /*
8568 * Load the registers.
8569 */
8570 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
8571 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
8572
8573 /* common for all formats */
8574 pDst->FCW = pSrc->FCW;
8575 pDst->FSW = pSrc->FSW;
8576 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8577 pDst->FOP = pSrc->FOP;
8578 pDst->MXCSR = fMXCSR;
8579 /* (MXCSR_MASK is read-only) */
8580 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
8581 {
8582 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8583 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8584 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8585 pDst->aRegs[i].au32[3] = 0;
8586 }
8587
8588 /* FPU IP, CS, DP and DS. */
8589 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8590 {
8591 pDst->FPUIP = pSrc->FPUIP;
8592 pDst->CS = pSrc->CS;
8593 pDst->Rsrvd1 = pSrc->Rsrvd1;
8594 pDst->FPUDP = pSrc->FPUDP;
8595 pDst->DS = pSrc->DS;
8596 pDst->Rsrvd2 = pSrc->Rsrvd2;
8597 }
8598 else
8599 {
8600 pDst->FPUIP = pSrc->FPUIP;
8601 pDst->CS = pSrc->CS;
8602 pDst->Rsrvd1 = 0;
8603 pDst->FPUDP = pSrc->FPUDP;
8604 pDst->DS = pSrc->DS;
8605 pDst->Rsrvd2 = 0;
8606 }
8607
8608 /* XMM registers. */
8609 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR)
8610 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT
8611 || pVCpu->iem.s.uCpl != 0)
8612 {
8613 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8614 for (uint32_t i = 0; i < cXmmRegs; i++)
8615 pDst->aXMM[i] = pSrc->aXMM[i];
8616 }
8617
8618 if (pDst->FSW & X86_FSW_ES)
8619 Log11(("fxrstor: %04x:%08RX64: loading state with pending FPU exception (FSW=%#x)\n",
8620 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pSrc->FSW));
8621
8622 /*
8623 * Commit the memory.
8624 */
8625 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
8626 if (rcStrict != VINF_SUCCESS)
8627 return rcStrict;
8628
8629 iemHlpUsedFpu(pVCpu);
8630 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8631}
8632
8633
8634/**
8635 * Implements 'XSAVE'.
8636 *
8637 * @param iEffSeg The effective segment.
8638 * @param GCPtrEff The address of the image.
8639 * @param enmEffOpSize The operand size (only REX.W really matters).
8640 */
8641IEM_CIMPL_DEF_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8642{
8643 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
8644
8645 /*
8646 * Raise exceptions.
8647 */
8648 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
8649 return iemRaiseUndefinedOpcode(pVCpu);
8650 /* When in VMX non-root mode and XSAVE/XRSTOR is not enabled, it results in #UD. */
8651 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8652 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_XSAVES_XRSTORS))
8653 {
8654 Log(("xrstor: Not enabled for nested-guest execution -> #UD\n"));
8655 return iemRaiseUndefinedOpcode(pVCpu);
8656 }
8657 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)
8658 return iemRaiseDeviceNotAvailable(pVCpu);
8659
8660 /*
8661 * Calc the requested mask.
8662 */
8663 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0];
8664 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
8665 uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0];
8666
8667/** @todo figure out the exact protocol for the memory access. Currently we
8668 * just need this crap to work halfways to make it possible to test
8669 * AVX instructions. */
8670/** @todo figure out the XINUSE and XMODIFIED */
8671
8672 /*
8673 * Access the x87 memory state.
8674 */
8675 /* The x87+SSE state. */
8676 void *pvMem512;
8677 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
8678 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8679 if (rcStrict != VINF_SUCCESS)
8680 return rcStrict;
8681 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
8682 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.XState.x87;
8683
8684 /* The header. */
8685 PX86XSAVEHDR pHdr;
8686 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW, 0 /* checked above */);
8687 if (rcStrict != VINF_SUCCESS)
8688 return rcStrict;
8689
8690 /*
8691 * Store the X87 state.
8692 */
8693 if (fReqComponents & XSAVE_C_X87)
8694 {
8695 /* common for all formats */
8696 pDst->FCW = pSrc->FCW;
8697 pDst->FSW = pSrc->FSW;
8698 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8699 pDst->FOP = pSrc->FOP;
8700 pDst->FPUIP = pSrc->FPUIP;
8701 pDst->CS = pSrc->CS;
8702 pDst->FPUDP = pSrc->FPUDP;
8703 pDst->DS = pSrc->DS;
8704 if (enmEffOpSize == IEMMODE_64BIT)
8705 {
8706 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8707 pDst->Rsrvd1 = pSrc->Rsrvd1;
8708 pDst->Rsrvd2 = pSrc->Rsrvd2;
8709 }
8710 else
8711 {
8712 pDst->Rsrvd1 = 0;
8713 pDst->Rsrvd2 = 0;
8714 }
8715 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8716 {
8717 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
8718 * them for now... */
8719 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8720 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8721 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8722 pDst->aRegs[i].au32[3] = 0;
8723 }
8724
8725 }
8726
8727 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
8728 {
8729 pDst->MXCSR = pSrc->MXCSR;
8730 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8731 }
8732
8733 if (fReqComponents & XSAVE_C_SSE)
8734 {
8735 /* XMM registers. */
8736 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8737 for (uint32_t i = 0; i < cXmmRegs; i++)
8738 pDst->aXMM[i] = pSrc->aXMM[i];
8739 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8740 * right? */
8741 }
8742
8743 /* Commit the x87 state bits. (probably wrong) */
8744 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8745 if (rcStrict != VINF_SUCCESS)
8746 return rcStrict;
8747
8748 /*
8749 * Store AVX state.
8750 */
8751 if (fReqComponents & XSAVE_C_YMM)
8752 {
8753 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
8754 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
8755 PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI);
8756 PX86XSAVEYMMHI pCompDst;
8757 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
8758 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 0 /* checked above */);
8759 if (rcStrict != VINF_SUCCESS)
8760 return rcStrict;
8761
8762 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8763 for (uint32_t i = 0; i < cXmmRegs; i++)
8764 pCompDst->aYmmHi[i] = pCompSrc->aYmmHi[i];
8765
8766 rcStrict = iemMemCommitAndUnmap(pVCpu, pCompDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
8767 if (rcStrict != VINF_SUCCESS)
8768 return rcStrict;
8769 }
8770
8771 /*
8772 * Update the header.
8773 */
8774 pHdr->bmXState = (pHdr->bmXState & ~fReqComponents)
8775 | (fReqComponents & fXInUse);
8776
8777 rcStrict = iemMemCommitAndUnmap(pVCpu, pHdr, IEM_ACCESS_DATA_RW);
8778 if (rcStrict != VINF_SUCCESS)
8779 return rcStrict;
8780
8781 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8782}
8783
8784
8785/**
8786 * Implements 'XRSTOR'.
8787 *
8788 * @param iEffSeg The effective segment.
8789 * @param GCPtrEff The address of the image.
8790 * @param enmEffOpSize The operand size (only REX.W really matters).
8791 */
8792IEM_CIMPL_DEF_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8793{
8794 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
8795
8796 /*
8797 * Raise exceptions.
8798 */
8799 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
8800 return iemRaiseUndefinedOpcode(pVCpu);
8801 /* When in VMX non-root mode and XSAVE/XRSTOR is not enabled, it results in #UD. */
8802 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8803 && !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_XSAVES_XRSTORS))
8804 {
8805 Log(("xrstor: Not enabled for nested-guest execution -> #UD\n"));
8806 return iemRaiseUndefinedOpcode(pVCpu);
8807 }
8808 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)
8809 return iemRaiseDeviceNotAvailable(pVCpu);
8810 if (GCPtrEff & 63)
8811 {
8812 /** @todo CPU/VM detection possible! \#AC might not be signal for
8813 * all/any misalignment sizes, intel says its an implementation detail. */
8814 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
8815 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC
8816 && pVCpu->iem.s.uCpl == 3)
8817 return iemRaiseAlignmentCheckException(pVCpu);
8818 return iemRaiseGeneralProtectionFault0(pVCpu);
8819 }
8820
8821/** @todo figure out the exact protocol for the memory access. Currently we
8822 * just need this crap to work halfways to make it possible to test
8823 * AVX instructions. */
8824/** @todo figure out the XINUSE and XMODIFIED */
8825
8826 /*
8827 * Access the x87 memory state.
8828 */
8829 /* The x87+SSE state. */
8830 void *pvMem512;
8831 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
8832 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8833 if (rcStrict != VINF_SUCCESS)
8834 return rcStrict;
8835 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
8836 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.XState.x87;
8837
8838 /*
8839 * Calc the requested mask
8840 */
8841 PX86XSAVEHDR pHdrDst = &pVCpu->cpum.GstCtx.XState.Hdr;
8842 PCX86XSAVEHDR pHdrSrc;
8843 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512,
8844 IEM_ACCESS_DATA_R, 0 /* checked above */);
8845 if (rcStrict != VINF_SUCCESS)
8846 return rcStrict;
8847
8848 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0];
8849 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
8850 //uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0];
8851 uint64_t const fRstorMask = pHdrSrc->bmXState;
8852 uint64_t const fCompMask = pHdrSrc->bmXComp;
8853
8854 AssertLogRelReturn(!(fCompMask & XSAVE_C_X), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
8855
8856 uint32_t const cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8857
8858 /* We won't need this any longer. */
8859 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pHdrSrc, IEM_ACCESS_DATA_R);
8860 if (rcStrict != VINF_SUCCESS)
8861 return rcStrict;
8862
8863 /*
8864 * Store the X87 state.
8865 */
8866 if (fReqComponents & XSAVE_C_X87)
8867 {
8868 if (fRstorMask & XSAVE_C_X87)
8869 {
8870 pDst->FCW = pSrc->FCW;
8871 pDst->FSW = pSrc->FSW;
8872 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8873 pDst->FOP = pSrc->FOP;
8874 pDst->FPUIP = pSrc->FPUIP;
8875 pDst->CS = pSrc->CS;
8876 pDst->FPUDP = pSrc->FPUDP;
8877 pDst->DS = pSrc->DS;
8878 if (enmEffOpSize == IEMMODE_64BIT)
8879 {
8880 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8881 pDst->Rsrvd1 = pSrc->Rsrvd1;
8882 pDst->Rsrvd2 = pSrc->Rsrvd2;
8883 }
8884 else
8885 {
8886 pDst->Rsrvd1 = 0;
8887 pDst->Rsrvd2 = 0;
8888 }
8889 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8890 {
8891 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8892 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8893 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8894 pDst->aRegs[i].au32[3] = 0;
8895 }
8896 if (pDst->FSW & X86_FSW_ES)
8897 Log11(("xrstor: %04x:%08RX64: loading state with pending FPU exception (FSW=%#x)\n",
8898 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pSrc->FSW));
8899 }
8900 else
8901 {
8902 pDst->FCW = 0x37f;
8903 pDst->FSW = 0;
8904 pDst->FTW = 0x00; /* 0 - empty. */
8905 pDst->FPUDP = 0;
8906 pDst->DS = 0; //??
8907 pDst->Rsrvd2= 0;
8908 pDst->FPUIP = 0;
8909 pDst->CS = 0; //??
8910 pDst->Rsrvd1= 0;
8911 pDst->FOP = 0;
8912 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
8913 {
8914 pDst->aRegs[i].au32[0] = 0;
8915 pDst->aRegs[i].au32[1] = 0;
8916 pDst->aRegs[i].au32[2] = 0;
8917 pDst->aRegs[i].au32[3] = 0;
8918 }
8919 }
8920 pHdrDst->bmXState |= XSAVE_C_X87; /* playing safe for now */
8921 }
8922
8923 /* MXCSR */
8924 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
8925 {
8926 if (fRstorMask & (XSAVE_C_SSE | XSAVE_C_YMM))
8927 pDst->MXCSR = pSrc->MXCSR;
8928 else
8929 pDst->MXCSR = 0x1f80;
8930 }
8931
8932 /* XMM registers. */
8933 if (fReqComponents & XSAVE_C_SSE)
8934 {
8935 if (fRstorMask & XSAVE_C_SSE)
8936 {
8937 for (uint32_t i = 0; i < cXmmRegs; i++)
8938 pDst->aXMM[i] = pSrc->aXMM[i];
8939 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8940 * right? */
8941 }
8942 else
8943 {
8944 for (uint32_t i = 0; i < cXmmRegs; i++)
8945 {
8946 pDst->aXMM[i].au64[0] = 0;
8947 pDst->aXMM[i].au64[1] = 0;
8948 }
8949 }
8950 pHdrDst->bmXState |= XSAVE_C_SSE; /* playing safe for now */
8951 }
8952
8953 /* Unmap the x87 state bits (so we've don't run out of mapping). */
8954 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
8955 if (rcStrict != VINF_SUCCESS)
8956 return rcStrict;
8957
8958 /*
8959 * Restore AVX state.
8960 */
8961 if (fReqComponents & XSAVE_C_YMM)
8962 {
8963 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
8964 PX86XSAVEYMMHI pCompDst = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PX86XSAVEYMMHI);
8965
8966 if (fRstorMask & XSAVE_C_YMM)
8967 {
8968 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
8969 PCX86XSAVEYMMHI pCompSrc;
8970 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, sizeof(*pCompDst),
8971 iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
8972 IEM_ACCESS_DATA_R, 0 /* checked above */);
8973 if (rcStrict != VINF_SUCCESS)
8974 return rcStrict;
8975
8976 for (uint32_t i = 0; i < cXmmRegs; i++)
8977 {
8978 pCompDst->aYmmHi[i].au64[0] = pCompSrc->aYmmHi[i].au64[0];
8979 pCompDst->aYmmHi[i].au64[1] = pCompSrc->aYmmHi[i].au64[1];
8980 }
8981
8982 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pCompSrc, IEM_ACCESS_DATA_R);
8983 if (rcStrict != VINF_SUCCESS)
8984 return rcStrict;
8985 }
8986 else
8987 {
8988 for (uint32_t i = 0; i < cXmmRegs; i++)
8989 {
8990 pCompDst->aYmmHi[i].au64[0] = 0;
8991 pCompDst->aYmmHi[i].au64[1] = 0;
8992 }
8993 }
8994 pHdrDst->bmXState |= XSAVE_C_YMM; /* playing safe for now */
8995 }
8996
8997 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8998}
8999
9000
9001
9002
9003/**
9004 * Implements 'STMXCSR'.
9005 *
9006 * @param iEffSeg The effective segment register for @a GCPtrEff.
9007 * @param GCPtrEff The address of the image.
9008 */
9009IEM_CIMPL_DEF_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9010{
9011 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
9012
9013 /*
9014 * Raise exceptions.
9015 */
9016 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
9017 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
9018 {
9019 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9020 {
9021 /*
9022 * Do the job.
9023 */
9024 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.XState.x87.MXCSR);
9025 if (rcStrict == VINF_SUCCESS)
9026 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9027 return rcStrict;
9028 }
9029 return iemRaiseDeviceNotAvailable(pVCpu);
9030 }
9031 return iemRaiseUndefinedOpcode(pVCpu);
9032}
9033
9034
9035/**
9036 * Implements 'VSTMXCSR'.
9037 *
9038 * @param iEffSeg The effective segment register for @a GCPtrEff.
9039 * @param GCPtrEff The address of the image.
9040 */
9041IEM_CIMPL_DEF_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9042{
9043 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_XCRx);
9044
9045 /*
9046 * Raise exceptions.
9047 */
9048 if ( ( !IEM_IS_GUEST_CPU_AMD(pVCpu)
9049 ? (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM)
9050 : !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)) /* AMD Jaguar CPU (f0x16,m0,s1) behaviour */
9051 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
9052 {
9053 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9054 {
9055 /*
9056 * Do the job.
9057 */
9058 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.XState.x87.MXCSR);
9059 if (rcStrict == VINF_SUCCESS)
9060 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9061 return rcStrict;
9062 }
9063 return iemRaiseDeviceNotAvailable(pVCpu);
9064 }
9065 return iemRaiseUndefinedOpcode(pVCpu);
9066}
9067
9068
9069/**
9070 * Implements 'LDMXCSR'.
9071 *
9072 * @param iEffSeg The effective segment register for @a GCPtrEff.
9073 * @param GCPtrEff The address of the image.
9074 */
9075IEM_CIMPL_DEF_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9076{
9077 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
9078
9079 /*
9080 * Raise exceptions.
9081 */
9082 /** @todo testcase - order of LDMXCSR faults. Does \#PF, \#GP and \#SS
9083 * happen after or before \#UD and \#EM? */
9084 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
9085 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
9086 {
9087 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9088 {
9089 /*
9090 * Do the job.
9091 */
9092 uint32_t fNewMxCsr;
9093 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, &fNewMxCsr, iEffSeg, GCPtrEff);
9094 if (rcStrict == VINF_SUCCESS)
9095 {
9096 uint32_t const fMxCsrMask = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
9097 if (!(fNewMxCsr & ~fMxCsrMask))
9098 {
9099 pVCpu->cpum.GstCtx.XState.x87.MXCSR = fNewMxCsr;
9100 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9101 }
9102 Log(("ldmxcsr: New MXCSR=%#RX32 & ~MASK=%#RX32 = %#RX32 -> #GP(0)\n",
9103 fNewMxCsr, fMxCsrMask, fNewMxCsr & ~fMxCsrMask));
9104 return iemRaiseGeneralProtectionFault0(pVCpu);
9105 }
9106 return rcStrict;
9107 }
9108 return iemRaiseDeviceNotAvailable(pVCpu);
9109 }
9110 return iemRaiseUndefinedOpcode(pVCpu);
9111}
9112
9113
9114/**
9115 * Commmon routine for fnstenv and fnsave.
9116 *
9117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9118 * @param enmEffOpSize The effective operand size.
9119 * @param uPtr Where to store the state.
9120 */
9121static void iemCImplCommonFpuStoreEnv(PVMCPUCC pVCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr)
9122{
9123 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9124 PCX86FXSTATE pSrcX87 = &pVCpu->cpum.GstCtx.XState.x87;
9125 if (enmEffOpSize == IEMMODE_16BIT)
9126 {
9127 uPtr.pu16[0] = pSrcX87->FCW;
9128 uPtr.pu16[1] = pSrcX87->FSW;
9129 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87);
9130 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9131 {
9132 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
9133 * protected mode or long mode and we save it in real mode? And vice
9134 * versa? And with 32-bit operand size? I think CPU is storing the
9135 * effective address ((CS << 4) + IP) in the offset register and not
9136 * doing any address calculations here. */
9137 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP;
9138 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP;
9139 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP;
9140 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000);
9141 }
9142 else
9143 {
9144 uPtr.pu16[3] = pSrcX87->FPUIP;
9145 uPtr.pu16[4] = pSrcX87->CS;
9146 uPtr.pu16[5] = pSrcX87->FPUDP;
9147 uPtr.pu16[6] = pSrcX87->DS;
9148 }
9149 }
9150 else
9151 {
9152 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
9153 uPtr.pu16[0*2] = pSrcX87->FCW;
9154 uPtr.pu16[0*2+1] = 0xffff; /* (0xffff observed on intel skylake.) */
9155 uPtr.pu16[1*2] = pSrcX87->FSW;
9156 uPtr.pu16[1*2+1] = 0xffff;
9157 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87);
9158 uPtr.pu16[2*2+1] = 0xffff;
9159 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9160 {
9161 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP;
9162 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP;
9163 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP;
9164 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4;
9165 }
9166 else
9167 {
9168 uPtr.pu32[3] = pSrcX87->FPUIP;
9169 uPtr.pu16[4*2] = pSrcX87->CS;
9170 uPtr.pu16[4*2+1] = pSrcX87->FOP;
9171 uPtr.pu32[5] = pSrcX87->FPUDP;
9172 uPtr.pu16[6*2] = pSrcX87->DS;
9173 uPtr.pu16[6*2+1] = 0xffff;
9174 }
9175 }
9176}
9177
9178
9179/**
9180 * Commmon routine for fldenv and frstor
9181 *
9182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9183 * @param enmEffOpSize The effective operand size.
9184 * @param uPtr Where to store the state.
9185 */
9186static void iemCImplCommonFpuRestoreEnv(PVMCPUCC pVCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr)
9187{
9188 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9189 PX86FXSTATE pDstX87 = &pVCpu->cpum.GstCtx.XState.x87;
9190 if (enmEffOpSize == IEMMODE_16BIT)
9191 {
9192 pDstX87->FCW = uPtr.pu16[0];
9193 pDstX87->FSW = uPtr.pu16[1];
9194 pDstX87->FTW = uPtr.pu16[2];
9195 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9196 {
9197 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
9198 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
9199 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
9200 pDstX87->CS = 0;
9201 pDstX87->Rsrvd1= 0;
9202 pDstX87->DS = 0;
9203 pDstX87->Rsrvd2= 0;
9204 }
9205 else
9206 {
9207 pDstX87->FPUIP = uPtr.pu16[3];
9208 pDstX87->CS = uPtr.pu16[4];
9209 pDstX87->Rsrvd1= 0;
9210 pDstX87->FPUDP = uPtr.pu16[5];
9211 pDstX87->DS = uPtr.pu16[6];
9212 pDstX87->Rsrvd2= 0;
9213 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
9214 }
9215 }
9216 else
9217 {
9218 pDstX87->FCW = uPtr.pu16[0*2];
9219 pDstX87->FSW = uPtr.pu16[1*2];
9220 pDstX87->FTW = uPtr.pu16[2*2];
9221 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9222 {
9223 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
9224 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
9225 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
9226 pDstX87->CS = 0;
9227 pDstX87->Rsrvd1= 0;
9228 pDstX87->DS = 0;
9229 pDstX87->Rsrvd2= 0;
9230 }
9231 else
9232 {
9233 pDstX87->FPUIP = uPtr.pu32[3];
9234 pDstX87->CS = uPtr.pu16[4*2];
9235 pDstX87->Rsrvd1= 0;
9236 pDstX87->FOP = uPtr.pu16[4*2+1];
9237 pDstX87->FPUDP = uPtr.pu32[5];
9238 pDstX87->DS = uPtr.pu16[6*2];
9239 pDstX87->Rsrvd2= 0;
9240 }
9241 }
9242
9243 /* Make adjustments. */
9244 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW);
9245#ifdef LOG_ENABLED
9246 uint16_t const fOldFsw = pDstX87->FSW;
9247#endif
9248 pDstX87->FCW &= ~X86_FCW_ZERO_MASK;
9249 iemFpuRecalcExceptionStatus(pDstX87);
9250#ifdef LOG_ENABLED
9251 if ((pDstX87->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9252 Log11(("iemCImplCommonFpuRestoreEnv: %04x:%08RX64: %s FPU exception (FCW=%#x FSW=%#x -> %#x)\n",
9253 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fOldFsw & X86_FSW_ES ? "Supressed" : "Raised",
9254 pDstX87->FCW, fOldFsw, pDstX87->FSW));
9255#endif
9256
9257 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
9258 * exceptions are pending after loading the saved state? */
9259}
9260
9261
9262/**
9263 * Implements 'FNSTENV'.
9264 *
9265 * @param enmEffOpSize The operand size (only REX.W really matters).
9266 * @param iEffSeg The effective segment register for @a GCPtrEffDst.
9267 * @param GCPtrEffDst The address of the image.
9268 */
9269IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
9270{
9271 RTPTRUNION uPtr;
9272 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
9273 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
9274 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ? */);
9275 if (rcStrict != VINF_SUCCESS)
9276 return rcStrict;
9277
9278 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr);
9279
9280 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
9281 if (rcStrict != VINF_SUCCESS)
9282 return rcStrict;
9283
9284 /* Mask all math exceptions. Any possibly pending exceptions will be cleared. */
9285 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9286 pFpuCtx->FCW |= X86_FCW_XCPT_MASK;
9287#ifdef LOG_ENABLED
9288 uint16_t fOldFsw = pFpuCtx->FSW;
9289#endif
9290 iemFpuRecalcExceptionStatus(pFpuCtx);
9291#ifdef LOG_ENABLED
9292 if ((pFpuCtx->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9293 Log11(("fnstenv: %04x:%08RX64: %s FPU exception (FCW=%#x, FSW %#x -> %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9294 fOldFsw & X86_FSW_ES ? "Supressed" : "Raised", pFpuCtx->FCW, fOldFsw, pFpuCtx->FSW));
9295#endif
9296
9297 iemHlpUsedFpu(pVCpu);
9298
9299 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
9300 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9301}
9302
9303
9304/**
9305 * Implements 'FNSAVE'.
9306 *
9307 * @param enmEffOpSize The operand size.
9308 * @param iEffSeg The effective segment register for @a GCPtrEffDst.
9309 * @param GCPtrEffDst The address of the image.
9310 */
9311IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
9312{
9313 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9314
9315 RTPTRUNION uPtr;
9316 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
9317 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 3 /** @todo ? */);
9318 if (rcStrict != VINF_SUCCESS)
9319 return rcStrict;
9320
9321 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9322 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr);
9323 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
9324 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
9325 {
9326 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0];
9327 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1];
9328 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4];
9329 }
9330
9331 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
9332 if (rcStrict != VINF_SUCCESS)
9333 return rcStrict;
9334
9335 /* Rotate the stack to account for changed TOS. */
9336 iemFpuRotateStackSetTop(pFpuCtx, 0);
9337
9338 /*
9339 * Re-initialize the FPU context.
9340 */
9341 pFpuCtx->FCW = 0x37f;
9342 pFpuCtx->FSW = 0;
9343 pFpuCtx->FTW = 0x00; /* 0 - empty */
9344 pFpuCtx->FPUDP = 0;
9345 pFpuCtx->DS = 0;
9346 pFpuCtx->Rsrvd2= 0;
9347 pFpuCtx->FPUIP = 0;
9348 pFpuCtx->CS = 0;
9349 pFpuCtx->Rsrvd1= 0;
9350 pFpuCtx->FOP = 0;
9351
9352 iemHlpUsedFpu(pVCpu);
9353 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9354}
9355
9356
9357
9358/**
9359 * Implements 'FLDENV'.
9360 *
9361 * @param enmEffOpSize The operand size (only REX.W really matters).
9362 * @param iEffSeg The effective segment register for @a GCPtrEffSrc.
9363 * @param GCPtrEffSrc The address of the image.
9364 */
9365IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
9366{
9367 RTCPTRUNION uPtr;
9368 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
9369 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R,
9370 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ?*/);
9371 if (rcStrict != VINF_SUCCESS)
9372 return rcStrict;
9373
9374 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr);
9375
9376 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
9377 if (rcStrict != VINF_SUCCESS)
9378 return rcStrict;
9379
9380 iemHlpUsedFpu(pVCpu);
9381 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9382}
9383
9384
9385/**
9386 * Implements 'FRSTOR'.
9387 *
9388 * @param enmEffOpSize The operand size.
9389 * @param iEffSeg The effective segment register for @a GCPtrEffSrc.
9390 * @param GCPtrEffSrc The address of the image.
9391 */
9392IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
9393{
9394 RTCPTRUNION uPtr;
9395 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
9396 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R, 3 /** @todo ?*/ );
9397 if (rcStrict != VINF_SUCCESS)
9398 return rcStrict;
9399
9400 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9401 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr);
9402 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
9403 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
9404 {
9405 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0];
9406 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1];
9407 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4];
9408 pFpuCtx->aRegs[i].au32[3] = 0;
9409 }
9410
9411 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
9412 if (rcStrict != VINF_SUCCESS)
9413 return rcStrict;
9414
9415 iemHlpUsedFpu(pVCpu);
9416 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9417}
9418
9419
9420/**
9421 * Implements 'FLDCW'.
9422 *
9423 * @param u16Fcw The new FCW.
9424 */
9425IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
9426{
9427 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9428
9429 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
9430 /** @todo Testcase: Try see what happens when trying to set undefined bits
9431 * (other than 6 and 7). Currently ignoring them. */
9432 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
9433 * according to FSW. (This is what is currently implemented.) */
9434 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9435 pFpuCtx->FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
9436#ifdef LOG_ENABLED
9437 uint16_t fOldFsw = pFpuCtx->FSW;
9438#endif
9439 iemFpuRecalcExceptionStatus(pFpuCtx);
9440#ifdef LOG_ENABLED
9441 if ((pFpuCtx->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9442 Log11(("fldcw: %04x:%08RX64: %s FPU exception (FCW=%#x, FSW %#x -> %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9443 fOldFsw & X86_FSW_ES ? "Supressed" : "Raised", pFpuCtx->FCW, fOldFsw, pFpuCtx->FSW));
9444#endif
9445
9446 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
9447 iemHlpUsedFpu(pVCpu);
9448 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9449}
9450
9451
9452
9453/**
9454 * Implements the underflow case of fxch.
9455 *
9456 * @param iStReg The other stack register.
9457 */
9458IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
9459{
9460 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9461
9462 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9463 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW);
9464 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
9465 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW));
9466
9467 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
9468 * registers are read as QNaN and then exchanged. This could be
9469 * wrong... */
9470 if (pFpuCtx->FCW & X86_FCW_IM)
9471 {
9472 if (RT_BIT(iReg1) & pFpuCtx->FTW)
9473 {
9474 if (RT_BIT(iReg2) & pFpuCtx->FTW)
9475 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
9476 else
9477 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80;
9478 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
9479 }
9480 else
9481 {
9482 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80;
9483 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
9484 }
9485 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
9486 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
9487 }
9488 else
9489 {
9490 /* raise underflow exception, don't change anything. */
9491 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
9492 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
9493 Log11(("fxch: %04x:%08RX64: Underflow exception (FSW=%#x)\n",
9494 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
9495 }
9496
9497 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
9498 iemHlpUsedFpu(pVCpu);
9499 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9500}
9501
9502
9503/**
9504 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
9505 *
9506 * @param iStReg The other stack register.
9507 * @param pfnAImpl The assembly comparison implementation.
9508 * @param fPop Whether we should pop the stack when done or not.
9509 */
9510IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
9511{
9512 Assert(iStReg < 8);
9513 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9514
9515 /*
9516 * Raise exceptions.
9517 */
9518 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS))
9519 return iemRaiseDeviceNotAvailable(pVCpu);
9520
9521 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9522 uint16_t u16Fsw = pFpuCtx->FSW;
9523 if (u16Fsw & X86_FSW_ES)
9524 return iemRaiseMathFault(pVCpu);
9525
9526 /*
9527 * Check if any of the register accesses causes #SF + #IA.
9528 */
9529 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
9530 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
9531 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
9532 {
9533 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
9534
9535 pFpuCtx->FSW &= ~X86_FSW_C1;
9536 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
9537 if ( !(u16Fsw & X86_FSW_IE)
9538 || (pFpuCtx->FCW & X86_FCW_IM) )
9539 {
9540 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9541 pVCpu->cpum.GstCtx.eflags.u |= u32Eflags & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9542 }
9543 }
9544 else if (pFpuCtx->FCW & X86_FCW_IM)
9545 {
9546 /* Masked underflow. */
9547 pFpuCtx->FSW &= ~X86_FSW_C1;
9548 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
9549 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9550 pVCpu->cpum.GstCtx.eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
9551 }
9552 else
9553 {
9554 /* Raise underflow - don't touch EFLAGS or TOP. */
9555 pFpuCtx->FSW &= ~X86_FSW_C1;
9556 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
9557 Log11(("fxch: %04x:%08RX64: Raising IE+SF exception (FSW=%#x)\n",
9558 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
9559 fPop = false;
9560 }
9561
9562 /*
9563 * Pop if necessary.
9564 */
9565 if (fPop)
9566 {
9567 pFpuCtx->FTW &= ~RT_BIT(iReg1);
9568 iemFpuStackIncTop(pVCpu);
9569 }
9570
9571 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
9572 iemHlpUsedFpu(pVCpu);
9573 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9574}
9575
9576/** @} */
9577
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette