VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 100764

Last change on this file since 100764 was 100731, checked in by vboxsync, 17 months ago

VMM/IEM: More on recompiling branch instruction. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 472.4 KB
Line 
1/* $Id: IEMAll.cpp 100731 2023-07-28 22:22:22Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 *
91 * The syscall logging level assignments:
92 * - Level 1: DOS and BIOS.
93 * - Level 2: Windows 3.x
94 * - Level 3: Linux.
95 */
96
97/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
98#ifdef _MSC_VER
99# pragma warning(disable:4505)
100#endif
101
102
103/*********************************************************************************************************************************
104* Header Files *
105*********************************************************************************************************************************/
106#define LOG_GROUP LOG_GROUP_IEM
107#define VMCPU_INCL_CPUM_GST_CTX
108#include <VBox/vmm/iem.h>
109#include <VBox/vmm/cpum.h>
110#include <VBox/vmm/apic.h>
111#include <VBox/vmm/pdm.h>
112#include <VBox/vmm/pgm.h>
113#include <VBox/vmm/iom.h>
114#include <VBox/vmm/em.h>
115#include <VBox/vmm/hm.h>
116#include <VBox/vmm/nem.h>
117#include <VBox/vmm/gim.h>
118#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
119# include <VBox/vmm/em.h>
120# include <VBox/vmm/hm_svm.h>
121#endif
122#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
123# include <VBox/vmm/hmvmxinline.h>
124#endif
125#include <VBox/vmm/tm.h>
126#include <VBox/vmm/dbgf.h>
127#include <VBox/vmm/dbgftrace.h>
128#include "IEMInternal.h"
129#include <VBox/vmm/vmcc.h>
130#include <VBox/log.h>
131#include <VBox/err.h>
132#include <VBox/param.h>
133#include <VBox/dis.h>
134#include <iprt/asm-math.h>
135#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
136# include <iprt/asm-amd64-x86.h>
137#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
138# include <iprt/asm-arm.h>
139#endif
140#include <iprt/assert.h>
141#include <iprt/string.h>
142#include <iprt/x86.h>
143
144#include "IEMInline.h"
145
146
147/*********************************************************************************************************************************
148* Structures and Typedefs *
149*********************************************************************************************************************************/
150/**
151 * CPU exception classes.
152 */
153typedef enum IEMXCPTCLASS
154{
155 IEMXCPTCLASS_BENIGN,
156 IEMXCPTCLASS_CONTRIBUTORY,
157 IEMXCPTCLASS_PAGE_FAULT,
158 IEMXCPTCLASS_DOUBLE_FAULT
159} IEMXCPTCLASS;
160
161
162/*********************************************************************************************************************************
163* Global Variables *
164*********************************************************************************************************************************/
165#if defined(IEM_LOG_MEMORY_WRITES)
166/** What IEM just wrote. */
167uint8_t g_abIemWrote[256];
168/** How much IEM just wrote. */
169size_t g_cbIemWrote;
170#endif
171
172
173/*********************************************************************************************************************************
174* Internal Functions *
175*********************************************************************************************************************************/
176static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
177 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
178
179
180/**
181 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
182 * path.
183 *
184 * @returns IEM_F_BRK_PENDING_XXX or zero.
185 * @param pVCpu The cross context virtual CPU structure of the
186 * calling thread.
187 *
188 * @note Don't call directly, use iemCalcExecDbgFlags instead.
189 */
190uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
191{
192 uint32_t fExec = 0;
193
194 /*
195 * Process guest breakpoints.
196 */
197#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
198 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
199 { \
200 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
201 { \
202 case X86_DR7_RW_EO: \
203 fExec |= IEM_F_PENDING_BRK_INSTR; \
204 break; \
205 case X86_DR7_RW_WO: \
206 case X86_DR7_RW_RW: \
207 fExec |= IEM_F_PENDING_BRK_DATA; \
208 break; \
209 case X86_DR7_RW_IO: \
210 fExec |= IEM_F_PENDING_BRK_X86_IO; \
211 break; \
212 } \
213 } \
214 } while (0)
215
216 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
217 if (fGstDr7 & X86_DR7_ENABLED_MASK)
218 {
219 PROCESS_ONE_BP(fGstDr7, 0);
220 PROCESS_ONE_BP(fGstDr7, 1);
221 PROCESS_ONE_BP(fGstDr7, 2);
222 PROCESS_ONE_BP(fGstDr7, 3);
223 }
224
225 /*
226 * Process hypervisor breakpoints.
227 */
228 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
229 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
230 {
231 PROCESS_ONE_BP(fHyperDr7, 0);
232 PROCESS_ONE_BP(fHyperDr7, 1);
233 PROCESS_ONE_BP(fHyperDr7, 2);
234 PROCESS_ONE_BP(fHyperDr7, 3);
235 }
236
237 return fExec;
238}
239
240
241/**
242 * Initializes the decoder state.
243 *
244 * iemReInitDecoder is mostly a copy of this function.
245 *
246 * @param pVCpu The cross context virtual CPU structure of the
247 * calling thread.
248 * @param fExecOpts Optional execution flags:
249 * - IEM_F_BYPASS_HANDLERS
250 * - IEM_F_X86_DISREGARD_LOCK
251 */
252DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
253{
254 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
255 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
257 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
258 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
259 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
260 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
261 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
262 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
263 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
264
265 /* Execution state: */
266 uint32_t fExec;
267 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
268
269 /* Decoder state: */
270 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
271 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
272 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
273 {
274 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
275 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
276 }
277 else
278 {
279 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
280 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
281 }
282 pVCpu->iem.s.fPrefixes = 0;
283 pVCpu->iem.s.uRexReg = 0;
284 pVCpu->iem.s.uRexB = 0;
285 pVCpu->iem.s.uRexIndex = 0;
286 pVCpu->iem.s.idxPrefix = 0;
287 pVCpu->iem.s.uVex3rdReg = 0;
288 pVCpu->iem.s.uVexLength = 0;
289 pVCpu->iem.s.fEvexStuff = 0;
290 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
291#ifdef IEM_WITH_CODE_TLB
292 pVCpu->iem.s.pbInstrBuf = NULL;
293 pVCpu->iem.s.offInstrNextByte = 0;
294 pVCpu->iem.s.offCurInstrStart = 0;
295# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
296 pVCpu->iem.s.offOpcode = 0;
297# endif
298# ifdef VBOX_STRICT
299 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
300 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
301 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
302 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
303# endif
304#else
305 pVCpu->iem.s.offOpcode = 0;
306 pVCpu->iem.s.cbOpcode = 0;
307#endif
308 pVCpu->iem.s.offModRm = 0;
309 pVCpu->iem.s.cActiveMappings = 0;
310 pVCpu->iem.s.iNextMapping = 0;
311 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
312
313#ifdef DBGFTRACE_ENABLED
314 switch (IEM_GET_CPU_MODE(pVCpu))
315 {
316 case IEMMODE_64BIT:
317 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
318 break;
319 case IEMMODE_32BIT:
320 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
321 break;
322 case IEMMODE_16BIT:
323 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
324 break;
325 }
326#endif
327}
328
329
330/**
331 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
332 *
333 * This is mostly a copy of iemInitDecoder.
334 *
335 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
336 */
337DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
338{
339 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
340 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
341 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
342 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
343 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
344 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
345 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
346 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
347 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
348
349 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
350 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
351 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
352
353 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
354 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
355 pVCpu->iem.s.enmEffAddrMode = enmMode;
356 if (enmMode != IEMMODE_64BIT)
357 {
358 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
359 pVCpu->iem.s.enmEffOpSize = enmMode;
360 }
361 else
362 {
363 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
364 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
365 }
366 pVCpu->iem.s.fPrefixes = 0;
367 pVCpu->iem.s.uRexReg = 0;
368 pVCpu->iem.s.uRexB = 0;
369 pVCpu->iem.s.uRexIndex = 0;
370 pVCpu->iem.s.idxPrefix = 0;
371 pVCpu->iem.s.uVex3rdReg = 0;
372 pVCpu->iem.s.uVexLength = 0;
373 pVCpu->iem.s.fEvexStuff = 0;
374 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
375#ifdef IEM_WITH_CODE_TLB
376 if (pVCpu->iem.s.pbInstrBuf)
377 {
378 uint64_t off = (enmMode == IEMMODE_64BIT
379 ? pVCpu->cpum.GstCtx.rip
380 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
381 - pVCpu->iem.s.uInstrBufPc;
382 if (off < pVCpu->iem.s.cbInstrBufTotal)
383 {
384 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
385 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
386 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
387 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
388 else
389 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
390 }
391 else
392 {
393 pVCpu->iem.s.pbInstrBuf = NULL;
394 pVCpu->iem.s.offInstrNextByte = 0;
395 pVCpu->iem.s.offCurInstrStart = 0;
396 pVCpu->iem.s.cbInstrBuf = 0;
397 pVCpu->iem.s.cbInstrBufTotal = 0;
398 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
399 }
400 }
401 else
402 {
403 pVCpu->iem.s.offInstrNextByte = 0;
404 pVCpu->iem.s.offCurInstrStart = 0;
405 pVCpu->iem.s.cbInstrBuf = 0;
406 pVCpu->iem.s.cbInstrBufTotal = 0;
407# ifdef VBOX_STRICT
408 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
409# endif
410 }
411# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
412 pVCpu->iem.s.offOpcode = 0;
413# endif
414#else /* !IEM_WITH_CODE_TLB */
415 pVCpu->iem.s.cbOpcode = 0;
416 pVCpu->iem.s.offOpcode = 0;
417#endif /* !IEM_WITH_CODE_TLB */
418 pVCpu->iem.s.offModRm = 0;
419 Assert(pVCpu->iem.s.cActiveMappings == 0);
420 pVCpu->iem.s.iNextMapping = 0;
421 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
422 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
423
424#ifdef DBGFTRACE_ENABLED
425 switch (enmMode)
426 {
427 case IEMMODE_64BIT:
428 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
429 break;
430 case IEMMODE_32BIT:
431 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
432 break;
433 case IEMMODE_16BIT:
434 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
435 break;
436 }
437#endif
438}
439
440
441
442/**
443 * Prefetch opcodes the first time when starting executing.
444 *
445 * @returns Strict VBox status code.
446 * @param pVCpu The cross context virtual CPU structure of the
447 * calling thread.
448 * @param fExecOpts Optional execution flags:
449 * - IEM_F_BYPASS_HANDLERS
450 * - IEM_F_X86_DISREGARD_LOCK
451 */
452static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
453{
454 iemInitDecoder(pVCpu, fExecOpts);
455
456#ifndef IEM_WITH_CODE_TLB
457 /*
458 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
459 *
460 * First translate CS:rIP to a physical address.
461 *
462 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
463 * all relevant bytes from the first page, as it ASSUMES it's only ever
464 * called for dealing with CS.LIM, page crossing and instructions that
465 * are too long.
466 */
467 uint32_t cbToTryRead;
468 RTGCPTR GCPtrPC;
469 if (IEM_IS_64BIT_CODE(pVCpu))
470 {
471 cbToTryRead = GUEST_PAGE_SIZE;
472 GCPtrPC = pVCpu->cpum.GstCtx.rip;
473 if (IEM_IS_CANONICAL(GCPtrPC))
474 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
475 else
476 return iemRaiseGeneralProtectionFault0(pVCpu);
477 }
478 else
479 {
480 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
481 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
482 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
483 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
484 else
485 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
486 if (cbToTryRead) { /* likely */ }
487 else /* overflowed */
488 {
489 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
490 cbToTryRead = UINT32_MAX;
491 }
492 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
493 Assert(GCPtrPC <= UINT32_MAX);
494 }
495
496 PGMPTWALK Walk;
497 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
498 if (RT_SUCCESS(rc))
499 Assert(Walk.fSucceeded); /* probable. */
500 else
501 {
502 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
503# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
504 if (Walk.fFailed & PGM_WALKFAIL_EPT)
505 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
506# endif
507 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
508 }
509 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
510 else
511 {
512 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
513# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
514 if (Walk.fFailed & PGM_WALKFAIL_EPT)
515 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
516# endif
517 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
518 }
519 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
520 else
521 {
522 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
523# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
524 if (Walk.fFailed & PGM_WALKFAIL_EPT)
525 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
526# endif
527 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
528 }
529 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
530 /** @todo Check reserved bits and such stuff. PGM is better at doing
531 * that, so do it when implementing the guest virtual address
532 * TLB... */
533
534 /*
535 * Read the bytes at this address.
536 */
537 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
538 if (cbToTryRead > cbLeftOnPage)
539 cbToTryRead = cbLeftOnPage;
540 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
541 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
542
543 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
544 {
545 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
546 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
547 { /* likely */ }
548 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
549 {
550 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
551 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
552 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
553 }
554 else
555 {
556 Log((RT_SUCCESS(rcStrict)
557 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
558 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
559 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
560 return rcStrict;
561 }
562 }
563 else
564 {
565 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
566 if (RT_SUCCESS(rc))
567 { /* likely */ }
568 else
569 {
570 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
571 GCPtrPC, GCPhys, rc, cbToTryRead));
572 return rc;
573 }
574 }
575 pVCpu->iem.s.cbOpcode = cbToTryRead;
576#endif /* !IEM_WITH_CODE_TLB */
577 return VINF_SUCCESS;
578}
579
580
581/**
582 * Invalidates the IEM TLBs.
583 *
584 * This is called internally as well as by PGM when moving GC mappings.
585 *
586 * @param pVCpu The cross context virtual CPU structure of the calling
587 * thread.
588 */
589VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
590{
591#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
592 Log10(("IEMTlbInvalidateAll\n"));
593# ifdef IEM_WITH_CODE_TLB
594 pVCpu->iem.s.cbInstrBufTotal = 0;
595 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
596 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
597 { /* very likely */ }
598 else
599 {
600 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
601 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
602 while (i-- > 0)
603 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
604 }
605# endif
606
607# ifdef IEM_WITH_DATA_TLB
608 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
609 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
610 { /* very likely */ }
611 else
612 {
613 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
614 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
615 while (i-- > 0)
616 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
617 }
618# endif
619#else
620 RT_NOREF(pVCpu);
621#endif
622}
623
624
625/**
626 * Invalidates a page in the TLBs.
627 *
628 * @param pVCpu The cross context virtual CPU structure of the calling
629 * thread.
630 * @param GCPtr The address of the page to invalidate
631 * @thread EMT(pVCpu)
632 */
633VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
634{
635#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
636 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
637 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
638 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
639 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
640
641# ifdef IEM_WITH_CODE_TLB
642 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
643 {
644 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
645 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
646 pVCpu->iem.s.cbInstrBufTotal = 0;
647 }
648# endif
649
650# ifdef IEM_WITH_DATA_TLB
651 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
652 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
653# endif
654#else
655 NOREF(pVCpu); NOREF(GCPtr);
656#endif
657}
658
659
660#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
661/**
662 * Invalid both TLBs slow fashion following a rollover.
663 *
664 * Worker for IEMTlbInvalidateAllPhysical,
665 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
666 * iemMemMapJmp and others.
667 *
668 * @thread EMT(pVCpu)
669 */
670static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
671{
672 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
673 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
674 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
675
676 unsigned i;
677# ifdef IEM_WITH_CODE_TLB
678 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
679 while (i-- > 0)
680 {
681 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
682 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
683 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
684 }
685# endif
686# ifdef IEM_WITH_DATA_TLB
687 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
688 while (i-- > 0)
689 {
690 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
691 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
692 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
693 }
694# endif
695
696}
697#endif
698
699
700/**
701 * Invalidates the host physical aspects of the IEM TLBs.
702 *
703 * This is called internally as well as by PGM when moving GC mappings.
704 *
705 * @param pVCpu The cross context virtual CPU structure of the calling
706 * thread.
707 * @note Currently not used.
708 */
709VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
710{
711#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
712 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
713 Log10(("IEMTlbInvalidateAllPhysical\n"));
714
715# ifdef IEM_WITH_CODE_TLB
716 pVCpu->iem.s.cbInstrBufTotal = 0;
717# endif
718 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
719 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
720 {
721 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
722 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
723 }
724 else
725 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
726#else
727 NOREF(pVCpu);
728#endif
729}
730
731
732/**
733 * Invalidates the host physical aspects of the IEM TLBs.
734 *
735 * This is called internally as well as by PGM when moving GC mappings.
736 *
737 * @param pVM The cross context VM structure.
738 * @param idCpuCaller The ID of the calling EMT if available to the caller,
739 * otherwise NIL_VMCPUID.
740 *
741 * @remarks Caller holds the PGM lock.
742 */
743VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
744{
745#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
746 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
747 if (pVCpuCaller)
748 VMCPU_ASSERT_EMT(pVCpuCaller);
749 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
750
751 VMCC_FOR_EACH_VMCPU(pVM)
752 {
753# ifdef IEM_WITH_CODE_TLB
754 if (pVCpuCaller == pVCpu)
755 pVCpu->iem.s.cbInstrBufTotal = 0;
756# endif
757
758 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
759 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
760 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
761 { /* likely */}
762 else if (pVCpuCaller == pVCpu)
763 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
764 else
765 {
766 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
767 continue;
768 }
769 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
770 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
771 }
772 VMCC_FOR_EACH_VMCPU_END(pVM);
773
774#else
775 RT_NOREF(pVM, idCpuCaller);
776#endif
777}
778
779
780/**
781 * Flushes the prefetch buffer, light version.
782 */
783void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
784{
785#ifndef IEM_WITH_CODE_TLB
786 pVCpu->iem.s.cbOpcode = cbInstr;
787#else
788 RT_NOREF(pVCpu, cbInstr);
789#endif
790}
791
792
793/**
794 * Flushes the prefetch buffer, heavy version.
795 */
796void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
797{
798#ifndef IEM_WITH_CODE_TLB
799 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
800#elif 1
801 pVCpu->iem.s.pbInstrBuf = NULL;
802 pVCpu->iem.s.cbInstrBufTotal = 0;
803 RT_NOREF(cbInstr);
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810
811#ifdef IEM_WITH_CODE_TLB
812
813/**
814 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
815 * failure and jumps.
816 *
817 * We end up here for a number of reasons:
818 * - pbInstrBuf isn't yet initialized.
819 * - Advancing beyond the buffer boundrary (e.g. cross page).
820 * - Advancing beyond the CS segment limit.
821 * - Fetching from non-mappable page (e.g. MMIO).
822 *
823 * @param pVCpu The cross context virtual CPU structure of the
824 * calling thread.
825 * @param pvDst Where to return the bytes.
826 * @param cbDst Number of bytes to read. A value of zero is
827 * allowed for initializing pbInstrBuf (the
828 * recompiler does this). In this case it is best
829 * to set pbInstrBuf to NULL prior to the call.
830 */
831void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
832{
833# ifdef IN_RING3
834 for (;;)
835 {
836 Assert(cbDst <= 8);
837 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
838
839 /*
840 * We might have a partial buffer match, deal with that first to make the
841 * rest simpler. This is the first part of the cross page/buffer case.
842 */
843 if (pVCpu->iem.s.pbInstrBuf != NULL)
844 {
845 if (offBuf < pVCpu->iem.s.cbInstrBuf)
846 {
847 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
848 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
849 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
850
851 cbDst -= cbCopy;
852 pvDst = (uint8_t *)pvDst + cbCopy;
853 offBuf += cbCopy;
854 pVCpu->iem.s.offInstrNextByte += offBuf;
855 }
856 }
857
858 /*
859 * Check segment limit, figuring how much we're allowed to access at this point.
860 *
861 * We will fault immediately if RIP is past the segment limit / in non-canonical
862 * territory. If we do continue, there are one or more bytes to read before we
863 * end up in trouble and we need to do that first before faulting.
864 */
865 RTGCPTR GCPtrFirst;
866 uint32_t cbMaxRead;
867 if (IEM_IS_64BIT_CODE(pVCpu))
868 {
869 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
870 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
871 { /* likely */ }
872 else
873 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
874 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
875 }
876 else
877 {
878 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
879 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
880 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
881 { /* likely */ }
882 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
883 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
884 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
885 if (cbMaxRead != 0)
886 { /* likely */ }
887 else
888 {
889 /* Overflowed because address is 0 and limit is max. */
890 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
891 cbMaxRead = X86_PAGE_SIZE;
892 }
893 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
894 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
895 if (cbMaxRead2 < cbMaxRead)
896 cbMaxRead = cbMaxRead2;
897 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
898 }
899
900 /*
901 * Get the TLB entry for this piece of code.
902 */
903 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
904 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
905 if (pTlbe->uTag == uTag)
906 {
907 /* likely when executing lots of code, otherwise unlikely */
908# ifdef VBOX_WITH_STATISTICS
909 pVCpu->iem.s.CodeTlb.cTlbHits++;
910# endif
911 }
912 else
913 {
914 pVCpu->iem.s.CodeTlb.cTlbMisses++;
915 PGMPTWALK Walk;
916 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
917 if (RT_FAILURE(rc))
918 {
919#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
920 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
921 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
922#endif
923 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
924 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
925 }
926
927 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
928 Assert(Walk.fSucceeded);
929 pTlbe->uTag = uTag;
930 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
931 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
932 pTlbe->GCPhys = Walk.GCPhys;
933 pTlbe->pbMappingR3 = NULL;
934 }
935
936 /*
937 * Check TLB page table level access flags.
938 */
939 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
940 {
941 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
942 {
943 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
944 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
945 }
946 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
947 {
948 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
949 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
950 }
951 }
952
953 /*
954 * Look up the physical page info if necessary.
955 */
956 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
957 { /* not necessary */ }
958 else
959 {
960 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
961 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
962 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
963 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
964 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
965 { /* likely */ }
966 else
967 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
968 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
969 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
970 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
971 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
972 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
973 }
974
975# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
976 /*
977 * Try do a direct read using the pbMappingR3 pointer.
978 */
979 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
980 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
981 {
982 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
983 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
984 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
985 {
986 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
987 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
988 }
989 else
990 {
991 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
992 if (cbInstr + (uint32_t)cbDst <= 15)
993 {
994 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
995 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
996 }
997 else
998 {
999 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1000 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1001 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1002 }
1003 }
1004 if (cbDst <= cbMaxRead)
1005 {
1006 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1007 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1008
1009 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1010 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1011 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1012 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1013 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1014 return;
1015 }
1016 pVCpu->iem.s.pbInstrBuf = NULL;
1017
1018 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1019 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1020 }
1021# else
1022# error "refactor as needed"
1023 /*
1024 * If there is no special read handling, so we can read a bit more and
1025 * put it in the prefetch buffer.
1026 */
1027 if ( cbDst < cbMaxRead
1028 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1029 {
1030 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1031 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1032 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1033 { /* likely */ }
1034 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1035 {
1036 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1037 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1038 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1039 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1040 }
1041 else
1042 {
1043 Log((RT_SUCCESS(rcStrict)
1044 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1045 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1046 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1047 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1048 }
1049 }
1050# endif
1051 /*
1052 * Special read handling, so only read exactly what's needed.
1053 * This is a highly unlikely scenario.
1054 */
1055 else
1056 {
1057 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1058
1059 /* Check instruction length. */
1060 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1061 if (RT_LIKELY(cbInstr + cbDst <= 15))
1062 { /* likely */ }
1063 else
1064 {
1065 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1066 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1067 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1068 }
1069
1070 /* Do the reading. */
1071 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1072 if (cbToRead > 0)
1073 {
1074 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1075 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1076 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1077 { /* likely */ }
1078 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1079 {
1080 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1081 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1082 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1083 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1084 }
1085 else
1086 {
1087 Log((RT_SUCCESS(rcStrict)
1088 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1089 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1090 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1091 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1092 }
1093 }
1094
1095 /* Update the state and probably return. */
1096 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1097 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1098 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1099
1100 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1101 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1102 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1103 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1104 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1105 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1106 pVCpu->iem.s.pbInstrBuf = NULL;
1107 if (cbToRead == cbDst)
1108 return;
1109 }
1110
1111 /*
1112 * More to read, loop.
1113 */
1114 cbDst -= cbMaxRead;
1115 pvDst = (uint8_t *)pvDst + cbMaxRead;
1116 }
1117# else /* !IN_RING3 */
1118 RT_NOREF(pvDst, cbDst);
1119 if (pvDst || cbDst)
1120 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1121# endif /* !IN_RING3 */
1122}
1123
1124#else /* !IEM_WITH_CODE_TLB */
1125
1126/**
1127 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1128 * exception if it fails.
1129 *
1130 * @returns Strict VBox status code.
1131 * @param pVCpu The cross context virtual CPU structure of the
1132 * calling thread.
1133 * @param cbMin The minimum number of bytes relative offOpcode
1134 * that must be read.
1135 */
1136VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1137{
1138 /*
1139 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1140 *
1141 * First translate CS:rIP to a physical address.
1142 */
1143 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1144 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1145 uint8_t const cbLeft = cbOpcode - offOpcode;
1146 Assert(cbLeft < cbMin);
1147 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1148
1149 uint32_t cbToTryRead;
1150 RTGCPTR GCPtrNext;
1151 if (IEM_IS_64BIT_CODE(pVCpu))
1152 {
1153 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1154 if (!IEM_IS_CANONICAL(GCPtrNext))
1155 return iemRaiseGeneralProtectionFault0(pVCpu);
1156 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1157 }
1158 else
1159 {
1160 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1161 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1162 GCPtrNext32 += cbOpcode;
1163 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1164 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1165 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1166 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1167 if (!cbToTryRead) /* overflowed */
1168 {
1169 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1170 cbToTryRead = UINT32_MAX;
1171 /** @todo check out wrapping around the code segment. */
1172 }
1173 if (cbToTryRead < cbMin - cbLeft)
1174 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1175 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1176
1177 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1178 if (cbToTryRead > cbLeftOnPage)
1179 cbToTryRead = cbLeftOnPage;
1180 }
1181
1182 /* Restrict to opcode buffer space.
1183
1184 We're making ASSUMPTIONS here based on work done previously in
1185 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1186 be fetched in case of an instruction crossing two pages. */
1187 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1188 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1189 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1190 { /* likely */ }
1191 else
1192 {
1193 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1194 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1195 return iemRaiseGeneralProtectionFault0(pVCpu);
1196 }
1197
1198 PGMPTWALK Walk;
1199 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1200 if (RT_FAILURE(rc))
1201 {
1202 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1203#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1204 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1205 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1206#endif
1207 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1208 }
1209 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1210 {
1211 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1212#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1213 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1214 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1215#endif
1216 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1217 }
1218 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1219 {
1220 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1221#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1222 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1223 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1224#endif
1225 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1226 }
1227 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1228 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1229 /** @todo Check reserved bits and such stuff. PGM is better at doing
1230 * that, so do it when implementing the guest virtual address
1231 * TLB... */
1232
1233 /*
1234 * Read the bytes at this address.
1235 *
1236 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1237 * and since PATM should only patch the start of an instruction there
1238 * should be no need to check again here.
1239 */
1240 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1241 {
1242 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1243 cbToTryRead, PGMACCESSORIGIN_IEM);
1244 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1245 { /* likely */ }
1246 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1247 {
1248 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1249 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1250 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1251 }
1252 else
1253 {
1254 Log((RT_SUCCESS(rcStrict)
1255 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1256 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1257 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1258 return rcStrict;
1259 }
1260 }
1261 else
1262 {
1263 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1264 if (RT_SUCCESS(rc))
1265 { /* likely */ }
1266 else
1267 {
1268 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1269 return rc;
1270 }
1271 }
1272 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1273 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1274
1275 return VINF_SUCCESS;
1276}
1277
1278#endif /* !IEM_WITH_CODE_TLB */
1279#ifndef IEM_WITH_SETJMP
1280
1281/**
1282 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1283 *
1284 * @returns Strict VBox status code.
1285 * @param pVCpu The cross context virtual CPU structure of the
1286 * calling thread.
1287 * @param pb Where to return the opcode byte.
1288 */
1289VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1290{
1291 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1292 if (rcStrict == VINF_SUCCESS)
1293 {
1294 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1295 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1296 pVCpu->iem.s.offOpcode = offOpcode + 1;
1297 }
1298 else
1299 *pb = 0;
1300 return rcStrict;
1301}
1302
1303#else /* IEM_WITH_SETJMP */
1304
1305/**
1306 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1307 *
1308 * @returns The opcode byte.
1309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1310 */
1311uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1312{
1313# ifdef IEM_WITH_CODE_TLB
1314 uint8_t u8;
1315 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1316 return u8;
1317# else
1318 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1319 if (rcStrict == VINF_SUCCESS)
1320 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1321 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1322# endif
1323}
1324
1325#endif /* IEM_WITH_SETJMP */
1326
1327#ifndef IEM_WITH_SETJMP
1328
1329/**
1330 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1331 *
1332 * @returns Strict VBox status code.
1333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1334 * @param pu16 Where to return the opcode dword.
1335 */
1336VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1337{
1338 uint8_t u8;
1339 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1340 if (rcStrict == VINF_SUCCESS)
1341 *pu16 = (int8_t)u8;
1342 return rcStrict;
1343}
1344
1345
1346/**
1347 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1348 *
1349 * @returns Strict VBox status code.
1350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1351 * @param pu32 Where to return the opcode dword.
1352 */
1353VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1354{
1355 uint8_t u8;
1356 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1357 if (rcStrict == VINF_SUCCESS)
1358 *pu32 = (int8_t)u8;
1359 return rcStrict;
1360}
1361
1362
1363/**
1364 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1365 *
1366 * @returns Strict VBox status code.
1367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1368 * @param pu64 Where to return the opcode qword.
1369 */
1370VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1371{
1372 uint8_t u8;
1373 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1374 if (rcStrict == VINF_SUCCESS)
1375 *pu64 = (int8_t)u8;
1376 return rcStrict;
1377}
1378
1379#endif /* !IEM_WITH_SETJMP */
1380
1381
1382#ifndef IEM_WITH_SETJMP
1383
1384/**
1385 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1386 *
1387 * @returns Strict VBox status code.
1388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1389 * @param pu16 Where to return the opcode word.
1390 */
1391VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1392{
1393 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1394 if (rcStrict == VINF_SUCCESS)
1395 {
1396 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1397# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1398 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1399# else
1400 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1401# endif
1402 pVCpu->iem.s.offOpcode = offOpcode + 2;
1403 }
1404 else
1405 *pu16 = 0;
1406 return rcStrict;
1407}
1408
1409#else /* IEM_WITH_SETJMP */
1410
1411/**
1412 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1413 *
1414 * @returns The opcode word.
1415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1416 */
1417uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1418{
1419# ifdef IEM_WITH_CODE_TLB
1420 uint16_t u16;
1421 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1422 return u16;
1423# else
1424 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1425 if (rcStrict == VINF_SUCCESS)
1426 {
1427 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1428 pVCpu->iem.s.offOpcode += 2;
1429# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1430 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1431# else
1432 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1433# endif
1434 }
1435 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1436# endif
1437}
1438
1439#endif /* IEM_WITH_SETJMP */
1440
1441#ifndef IEM_WITH_SETJMP
1442
1443/**
1444 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1445 *
1446 * @returns Strict VBox status code.
1447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1448 * @param pu32 Where to return the opcode double word.
1449 */
1450VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1451{
1452 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1453 if (rcStrict == VINF_SUCCESS)
1454 {
1455 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1456 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1457 pVCpu->iem.s.offOpcode = offOpcode + 2;
1458 }
1459 else
1460 *pu32 = 0;
1461 return rcStrict;
1462}
1463
1464
1465/**
1466 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1467 *
1468 * @returns Strict VBox status code.
1469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1470 * @param pu64 Where to return the opcode quad word.
1471 */
1472VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1473{
1474 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1475 if (rcStrict == VINF_SUCCESS)
1476 {
1477 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1478 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1479 pVCpu->iem.s.offOpcode = offOpcode + 2;
1480 }
1481 else
1482 *pu64 = 0;
1483 return rcStrict;
1484}
1485
1486#endif /* !IEM_WITH_SETJMP */
1487
1488#ifndef IEM_WITH_SETJMP
1489
1490/**
1491 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1492 *
1493 * @returns Strict VBox status code.
1494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1495 * @param pu32 Where to return the opcode dword.
1496 */
1497VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1498{
1499 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1500 if (rcStrict == VINF_SUCCESS)
1501 {
1502 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1503# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1504 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1505# else
1506 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1507 pVCpu->iem.s.abOpcode[offOpcode + 1],
1508 pVCpu->iem.s.abOpcode[offOpcode + 2],
1509 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1510# endif
1511 pVCpu->iem.s.offOpcode = offOpcode + 4;
1512 }
1513 else
1514 *pu32 = 0;
1515 return rcStrict;
1516}
1517
1518#else /* IEM_WITH_SETJMP */
1519
1520/**
1521 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1522 *
1523 * @returns The opcode dword.
1524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1525 */
1526uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1527{
1528# ifdef IEM_WITH_CODE_TLB
1529 uint32_t u32;
1530 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1531 return u32;
1532# else
1533 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1534 if (rcStrict == VINF_SUCCESS)
1535 {
1536 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1537 pVCpu->iem.s.offOpcode = offOpcode + 4;
1538# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1539 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1540# else
1541 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1542 pVCpu->iem.s.abOpcode[offOpcode + 1],
1543 pVCpu->iem.s.abOpcode[offOpcode + 2],
1544 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1545# endif
1546 }
1547 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1548# endif
1549}
1550
1551#endif /* IEM_WITH_SETJMP */
1552
1553#ifndef IEM_WITH_SETJMP
1554
1555/**
1556 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1557 *
1558 * @returns Strict VBox status code.
1559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1560 * @param pu64 Where to return the opcode dword.
1561 */
1562VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1563{
1564 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1565 if (rcStrict == VINF_SUCCESS)
1566 {
1567 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1568 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1569 pVCpu->iem.s.abOpcode[offOpcode + 1],
1570 pVCpu->iem.s.abOpcode[offOpcode + 2],
1571 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1572 pVCpu->iem.s.offOpcode = offOpcode + 4;
1573 }
1574 else
1575 *pu64 = 0;
1576 return rcStrict;
1577}
1578
1579
1580/**
1581 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1582 *
1583 * @returns Strict VBox status code.
1584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1585 * @param pu64 Where to return the opcode qword.
1586 */
1587VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1588{
1589 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1590 if (rcStrict == VINF_SUCCESS)
1591 {
1592 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1593 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1594 pVCpu->iem.s.abOpcode[offOpcode + 1],
1595 pVCpu->iem.s.abOpcode[offOpcode + 2],
1596 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1597 pVCpu->iem.s.offOpcode = offOpcode + 4;
1598 }
1599 else
1600 *pu64 = 0;
1601 return rcStrict;
1602}
1603
1604#endif /* !IEM_WITH_SETJMP */
1605
1606#ifndef IEM_WITH_SETJMP
1607
1608/**
1609 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1610 *
1611 * @returns Strict VBox status code.
1612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1613 * @param pu64 Where to return the opcode qword.
1614 */
1615VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1616{
1617 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1618 if (rcStrict == VINF_SUCCESS)
1619 {
1620 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1621# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1622 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1623# else
1624 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1625 pVCpu->iem.s.abOpcode[offOpcode + 1],
1626 pVCpu->iem.s.abOpcode[offOpcode + 2],
1627 pVCpu->iem.s.abOpcode[offOpcode + 3],
1628 pVCpu->iem.s.abOpcode[offOpcode + 4],
1629 pVCpu->iem.s.abOpcode[offOpcode + 5],
1630 pVCpu->iem.s.abOpcode[offOpcode + 6],
1631 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1632# endif
1633 pVCpu->iem.s.offOpcode = offOpcode + 8;
1634 }
1635 else
1636 *pu64 = 0;
1637 return rcStrict;
1638}
1639
1640#else /* IEM_WITH_SETJMP */
1641
1642/**
1643 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1644 *
1645 * @returns The opcode qword.
1646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1647 */
1648uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1649{
1650# ifdef IEM_WITH_CODE_TLB
1651 uint64_t u64;
1652 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1653 return u64;
1654# else
1655 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1656 if (rcStrict == VINF_SUCCESS)
1657 {
1658 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1659 pVCpu->iem.s.offOpcode = offOpcode + 8;
1660# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1661 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1662# else
1663 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1664 pVCpu->iem.s.abOpcode[offOpcode + 1],
1665 pVCpu->iem.s.abOpcode[offOpcode + 2],
1666 pVCpu->iem.s.abOpcode[offOpcode + 3],
1667 pVCpu->iem.s.abOpcode[offOpcode + 4],
1668 pVCpu->iem.s.abOpcode[offOpcode + 5],
1669 pVCpu->iem.s.abOpcode[offOpcode + 6],
1670 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1671# endif
1672 }
1673 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1674# endif
1675}
1676
1677#endif /* IEM_WITH_SETJMP */
1678
1679
1680
1681/** @name Misc Worker Functions.
1682 * @{
1683 */
1684
1685/**
1686 * Gets the exception class for the specified exception vector.
1687 *
1688 * @returns The class of the specified exception.
1689 * @param uVector The exception vector.
1690 */
1691static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1692{
1693 Assert(uVector <= X86_XCPT_LAST);
1694 switch (uVector)
1695 {
1696 case X86_XCPT_DE:
1697 case X86_XCPT_TS:
1698 case X86_XCPT_NP:
1699 case X86_XCPT_SS:
1700 case X86_XCPT_GP:
1701 case X86_XCPT_SX: /* AMD only */
1702 return IEMXCPTCLASS_CONTRIBUTORY;
1703
1704 case X86_XCPT_PF:
1705 case X86_XCPT_VE: /* Intel only */
1706 return IEMXCPTCLASS_PAGE_FAULT;
1707
1708 case X86_XCPT_DF:
1709 return IEMXCPTCLASS_DOUBLE_FAULT;
1710 }
1711 return IEMXCPTCLASS_BENIGN;
1712}
1713
1714
1715/**
1716 * Evaluates how to handle an exception caused during delivery of another event
1717 * (exception / interrupt).
1718 *
1719 * @returns How to handle the recursive exception.
1720 * @param pVCpu The cross context virtual CPU structure of the
1721 * calling thread.
1722 * @param fPrevFlags The flags of the previous event.
1723 * @param uPrevVector The vector of the previous event.
1724 * @param fCurFlags The flags of the current exception.
1725 * @param uCurVector The vector of the current exception.
1726 * @param pfXcptRaiseInfo Where to store additional information about the
1727 * exception condition. Optional.
1728 */
1729VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1730 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1731{
1732 /*
1733 * Only CPU exceptions can be raised while delivering other events, software interrupt
1734 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1735 */
1736 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1737 Assert(pVCpu); RT_NOREF(pVCpu);
1738 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1739
1740 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1741 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1742 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1743 {
1744 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1745 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1746 {
1747 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1748 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1749 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1750 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1751 {
1752 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1753 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1754 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1755 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1756 uCurVector, pVCpu->cpum.GstCtx.cr2));
1757 }
1758 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1759 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1760 {
1761 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1762 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1763 }
1764 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1765 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1766 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1767 {
1768 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1769 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1770 }
1771 }
1772 else
1773 {
1774 if (uPrevVector == X86_XCPT_NMI)
1775 {
1776 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1777 if (uCurVector == X86_XCPT_PF)
1778 {
1779 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1780 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1781 }
1782 }
1783 else if ( uPrevVector == X86_XCPT_AC
1784 && uCurVector == X86_XCPT_AC)
1785 {
1786 enmRaise = IEMXCPTRAISE_CPU_HANG;
1787 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1788 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1789 }
1790 }
1791 }
1792 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1793 {
1794 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1795 if (uCurVector == X86_XCPT_PF)
1796 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1797 }
1798 else
1799 {
1800 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1801 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1802 }
1803
1804 if (pfXcptRaiseInfo)
1805 *pfXcptRaiseInfo = fRaiseInfo;
1806 return enmRaise;
1807}
1808
1809
1810/**
1811 * Enters the CPU shutdown state initiated by a triple fault or other
1812 * unrecoverable conditions.
1813 *
1814 * @returns Strict VBox status code.
1815 * @param pVCpu The cross context virtual CPU structure of the
1816 * calling thread.
1817 */
1818static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1819{
1820 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1821 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1822
1823 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1824 {
1825 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1826 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1827 }
1828
1829 RT_NOREF(pVCpu);
1830 return VINF_EM_TRIPLE_FAULT;
1831}
1832
1833
1834/**
1835 * Validates a new SS segment.
1836 *
1837 * @returns VBox strict status code.
1838 * @param pVCpu The cross context virtual CPU structure of the
1839 * calling thread.
1840 * @param NewSS The new SS selctor.
1841 * @param uCpl The CPL to load the stack for.
1842 * @param pDesc Where to return the descriptor.
1843 */
1844static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1845{
1846 /* Null selectors are not allowed (we're not called for dispatching
1847 interrupts with SS=0 in long mode). */
1848 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1849 {
1850 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1851 return iemRaiseTaskSwitchFault0(pVCpu);
1852 }
1853
1854 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1855 if ((NewSS & X86_SEL_RPL) != uCpl)
1856 {
1857 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1858 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1859 }
1860
1861 /*
1862 * Read the descriptor.
1863 */
1864 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1865 if (rcStrict != VINF_SUCCESS)
1866 return rcStrict;
1867
1868 /*
1869 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1870 */
1871 if (!pDesc->Legacy.Gen.u1DescType)
1872 {
1873 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1874 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1875 }
1876
1877 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1878 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1879 {
1880 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1881 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1882 }
1883 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1884 {
1885 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1886 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1887 }
1888
1889 /* Is it there? */
1890 /** @todo testcase: Is this checked before the canonical / limit check below? */
1891 if (!pDesc->Legacy.Gen.u1Present)
1892 {
1893 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1894 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1895 }
1896
1897 return VINF_SUCCESS;
1898}
1899
1900/** @} */
1901
1902
1903/** @name Raising Exceptions.
1904 *
1905 * @{
1906 */
1907
1908
1909/**
1910 * Loads the specified stack far pointer from the TSS.
1911 *
1912 * @returns VBox strict status code.
1913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1914 * @param uCpl The CPL to load the stack for.
1915 * @param pSelSS Where to return the new stack segment.
1916 * @param puEsp Where to return the new stack pointer.
1917 */
1918static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1919{
1920 VBOXSTRICTRC rcStrict;
1921 Assert(uCpl < 4);
1922
1923 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1924 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1925 {
1926 /*
1927 * 16-bit TSS (X86TSS16).
1928 */
1929 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1930 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1931 {
1932 uint32_t off = uCpl * 4 + 2;
1933 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1934 {
1935 /** @todo check actual access pattern here. */
1936 uint32_t u32Tmp = 0; /* gcc maybe... */
1937 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1938 if (rcStrict == VINF_SUCCESS)
1939 {
1940 *puEsp = RT_LOWORD(u32Tmp);
1941 *pSelSS = RT_HIWORD(u32Tmp);
1942 return VINF_SUCCESS;
1943 }
1944 }
1945 else
1946 {
1947 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1948 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1949 }
1950 break;
1951 }
1952
1953 /*
1954 * 32-bit TSS (X86TSS32).
1955 */
1956 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1957 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1958 {
1959 uint32_t off = uCpl * 8 + 4;
1960 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1961 {
1962/** @todo check actual access pattern here. */
1963 uint64_t u64Tmp;
1964 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1965 if (rcStrict == VINF_SUCCESS)
1966 {
1967 *puEsp = u64Tmp & UINT32_MAX;
1968 *pSelSS = (RTSEL)(u64Tmp >> 32);
1969 return VINF_SUCCESS;
1970 }
1971 }
1972 else
1973 {
1974 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1975 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1976 }
1977 break;
1978 }
1979
1980 default:
1981 AssertFailed();
1982 rcStrict = VERR_IEM_IPE_4;
1983 break;
1984 }
1985
1986 *puEsp = 0; /* make gcc happy */
1987 *pSelSS = 0; /* make gcc happy */
1988 return rcStrict;
1989}
1990
1991
1992/**
1993 * Loads the specified stack pointer from the 64-bit TSS.
1994 *
1995 * @returns VBox strict status code.
1996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1997 * @param uCpl The CPL to load the stack for.
1998 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1999 * @param puRsp Where to return the new stack pointer.
2000 */
2001static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2002{
2003 Assert(uCpl < 4);
2004 Assert(uIst < 8);
2005 *puRsp = 0; /* make gcc happy */
2006
2007 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2008 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2009
2010 uint32_t off;
2011 if (uIst)
2012 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2013 else
2014 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2015 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2016 {
2017 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2018 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2019 }
2020
2021 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2022}
2023
2024
2025/**
2026 * Adjust the CPU state according to the exception being raised.
2027 *
2028 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2029 * @param u8Vector The exception that has been raised.
2030 */
2031DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2032{
2033 switch (u8Vector)
2034 {
2035 case X86_XCPT_DB:
2036 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2037 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2038 break;
2039 /** @todo Read the AMD and Intel exception reference... */
2040 }
2041}
2042
2043
2044/**
2045 * Implements exceptions and interrupts for real mode.
2046 *
2047 * @returns VBox strict status code.
2048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2049 * @param cbInstr The number of bytes to offset rIP by in the return
2050 * address.
2051 * @param u8Vector The interrupt / exception vector number.
2052 * @param fFlags The flags.
2053 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2054 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2055 */
2056static VBOXSTRICTRC
2057iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2058 uint8_t cbInstr,
2059 uint8_t u8Vector,
2060 uint32_t fFlags,
2061 uint16_t uErr,
2062 uint64_t uCr2) RT_NOEXCEPT
2063{
2064 NOREF(uErr); NOREF(uCr2);
2065 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2066
2067 /*
2068 * Read the IDT entry.
2069 */
2070 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2071 {
2072 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2073 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2074 }
2075 RTFAR16 Idte;
2076 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2077 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2078 {
2079 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2080 return rcStrict;
2081 }
2082
2083 /*
2084 * Push the stack frame.
2085 */
2086 uint16_t *pu16Frame;
2087 uint64_t uNewRsp;
2088 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2089 if (rcStrict != VINF_SUCCESS)
2090 return rcStrict;
2091
2092 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2093#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2094 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2095 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2096 fEfl |= UINT16_C(0xf000);
2097#endif
2098 pu16Frame[2] = (uint16_t)fEfl;
2099 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2100 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2101 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2102 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2103 return rcStrict;
2104
2105 /*
2106 * Load the vector address into cs:ip and make exception specific state
2107 * adjustments.
2108 */
2109 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2110 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2111 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2112 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2113 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2114 pVCpu->cpum.GstCtx.rip = Idte.off;
2115 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2116 IEMMISC_SET_EFL(pVCpu, fEfl);
2117
2118 /** @todo do we actually do this in real mode? */
2119 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2120 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2121
2122 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2123 so best leave them alone in case we're in a weird kind of real mode... */
2124
2125 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2126}
2127
2128
2129/**
2130 * Loads a NULL data selector into when coming from V8086 mode.
2131 *
2132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2133 * @param pSReg Pointer to the segment register.
2134 */
2135DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2136{
2137 pSReg->Sel = 0;
2138 pSReg->ValidSel = 0;
2139 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2140 {
2141 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2142 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2143 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2144 }
2145 else
2146 {
2147 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2148 /** @todo check this on AMD-V */
2149 pSReg->u64Base = 0;
2150 pSReg->u32Limit = 0;
2151 }
2152}
2153
2154
2155/**
2156 * Loads a segment selector during a task switch in V8086 mode.
2157 *
2158 * @param pSReg Pointer to the segment register.
2159 * @param uSel The selector value to load.
2160 */
2161DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2162{
2163 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2164 pSReg->Sel = uSel;
2165 pSReg->ValidSel = uSel;
2166 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2167 pSReg->u64Base = uSel << 4;
2168 pSReg->u32Limit = 0xffff;
2169 pSReg->Attr.u = 0xf3;
2170}
2171
2172
2173/**
2174 * Loads a segment selector during a task switch in protected mode.
2175 *
2176 * In this task switch scenario, we would throw \#TS exceptions rather than
2177 * \#GPs.
2178 *
2179 * @returns VBox strict status code.
2180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2181 * @param pSReg Pointer to the segment register.
2182 * @param uSel The new selector value.
2183 *
2184 * @remarks This does _not_ handle CS or SS.
2185 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2186 */
2187static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2188{
2189 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2190
2191 /* Null data selector. */
2192 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2193 {
2194 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2195 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2196 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2197 return VINF_SUCCESS;
2198 }
2199
2200 /* Fetch the descriptor. */
2201 IEMSELDESC Desc;
2202 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2203 if (rcStrict != VINF_SUCCESS)
2204 {
2205 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2206 VBOXSTRICTRC_VAL(rcStrict)));
2207 return rcStrict;
2208 }
2209
2210 /* Must be a data segment or readable code segment. */
2211 if ( !Desc.Legacy.Gen.u1DescType
2212 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2213 {
2214 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2215 Desc.Legacy.Gen.u4Type));
2216 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2217 }
2218
2219 /* Check privileges for data segments and non-conforming code segments. */
2220 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2221 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2222 {
2223 /* The RPL and the new CPL must be less than or equal to the DPL. */
2224 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2225 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2226 {
2227 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2228 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2229 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2230 }
2231 }
2232
2233 /* Is it there? */
2234 if (!Desc.Legacy.Gen.u1Present)
2235 {
2236 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2237 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2238 }
2239
2240 /* The base and limit. */
2241 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2242 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2243
2244 /*
2245 * Ok, everything checked out fine. Now set the accessed bit before
2246 * committing the result into the registers.
2247 */
2248 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2249 {
2250 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2251 if (rcStrict != VINF_SUCCESS)
2252 return rcStrict;
2253 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2254 }
2255
2256 /* Commit */
2257 pSReg->Sel = uSel;
2258 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2259 pSReg->u32Limit = cbLimit;
2260 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2261 pSReg->ValidSel = uSel;
2262 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2263 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2264 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2265
2266 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2267 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2268 return VINF_SUCCESS;
2269}
2270
2271
2272/**
2273 * Performs a task switch.
2274 *
2275 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2276 * caller is responsible for performing the necessary checks (like DPL, TSS
2277 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2278 * reference for JMP, CALL, IRET.
2279 *
2280 * If the task switch is the due to a software interrupt or hardware exception,
2281 * the caller is responsible for validating the TSS selector and descriptor. See
2282 * Intel Instruction reference for INT n.
2283 *
2284 * @returns VBox strict status code.
2285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2286 * @param enmTaskSwitch The cause of the task switch.
2287 * @param uNextEip The EIP effective after the task switch.
2288 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2289 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2290 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2291 * @param SelTSS The TSS selector of the new task.
2292 * @param pNewDescTSS Pointer to the new TSS descriptor.
2293 */
2294VBOXSTRICTRC
2295iemTaskSwitch(PVMCPUCC pVCpu,
2296 IEMTASKSWITCH enmTaskSwitch,
2297 uint32_t uNextEip,
2298 uint32_t fFlags,
2299 uint16_t uErr,
2300 uint64_t uCr2,
2301 RTSEL SelTSS,
2302 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2303{
2304 Assert(!IEM_IS_REAL_MODE(pVCpu));
2305 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2306 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2307
2308 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2309 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2310 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2311 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2312 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2313
2314 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2315 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2316
2317 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2318 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2319
2320 /* Update CR2 in case it's a page-fault. */
2321 /** @todo This should probably be done much earlier in IEM/PGM. See
2322 * @bugref{5653#c49}. */
2323 if (fFlags & IEM_XCPT_FLAGS_CR2)
2324 pVCpu->cpum.GstCtx.cr2 = uCr2;
2325
2326 /*
2327 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2328 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2329 */
2330 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2331 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2332 if (uNewTSSLimit < uNewTSSLimitMin)
2333 {
2334 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2335 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2336 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2337 }
2338
2339 /*
2340 * Task switches in VMX non-root mode always cause task switches.
2341 * The new TSS must have been read and validated (DPL, limits etc.) before a
2342 * task-switch VM-exit commences.
2343 *
2344 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2345 */
2346 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2347 {
2348 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2349 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2350 }
2351
2352 /*
2353 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2354 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2355 */
2356 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2357 {
2358 uint32_t const uExitInfo1 = SelTSS;
2359 uint32_t uExitInfo2 = uErr;
2360 switch (enmTaskSwitch)
2361 {
2362 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2363 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2364 default: break;
2365 }
2366 if (fFlags & IEM_XCPT_FLAGS_ERR)
2367 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2368 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2369 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2370
2371 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2372 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2373 RT_NOREF2(uExitInfo1, uExitInfo2);
2374 }
2375
2376 /*
2377 * Check the current TSS limit. The last written byte to the current TSS during the
2378 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2379 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2380 *
2381 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2382 * end up with smaller than "legal" TSS limits.
2383 */
2384 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2385 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2386 if (uCurTSSLimit < uCurTSSLimitMin)
2387 {
2388 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2389 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2390 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2391 }
2392
2393 /*
2394 * Verify that the new TSS can be accessed and map it. Map only the required contents
2395 * and not the entire TSS.
2396 */
2397 void *pvNewTSS;
2398 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2399 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2400 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2401 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2402 * not perform correct translation if this happens. See Intel spec. 7.2.1
2403 * "Task-State Segment". */
2404 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2405 if (rcStrict != VINF_SUCCESS)
2406 {
2407 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2408 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2409 return rcStrict;
2410 }
2411
2412 /*
2413 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2414 */
2415 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2416 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2417 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2418 {
2419 PX86DESC pDescCurTSS;
2420 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2421 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2422 if (rcStrict != VINF_SUCCESS)
2423 {
2424 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2425 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2426 return rcStrict;
2427 }
2428
2429 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2430 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2431 if (rcStrict != VINF_SUCCESS)
2432 {
2433 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2434 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2435 return rcStrict;
2436 }
2437
2438 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2439 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2440 {
2441 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2442 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2443 fEFlags &= ~X86_EFL_NT;
2444 }
2445 }
2446
2447 /*
2448 * Save the CPU state into the current TSS.
2449 */
2450 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2451 if (GCPtrNewTSS == GCPtrCurTSS)
2452 {
2453 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2454 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2455 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2456 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2457 pVCpu->cpum.GstCtx.ldtr.Sel));
2458 }
2459 if (fIsNewTSS386)
2460 {
2461 /*
2462 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2463 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2464 */
2465 void *pvCurTSS32;
2466 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2467 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2468 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2469 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2470 if (rcStrict != VINF_SUCCESS)
2471 {
2472 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2473 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2474 return rcStrict;
2475 }
2476
2477 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2478 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2479 pCurTSS32->eip = uNextEip;
2480 pCurTSS32->eflags = fEFlags;
2481 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2482 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2483 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2484 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2485 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2486 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2487 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2488 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2489 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2490 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2491 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2492 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2493 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2494 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2495
2496 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2497 if (rcStrict != VINF_SUCCESS)
2498 {
2499 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2500 VBOXSTRICTRC_VAL(rcStrict)));
2501 return rcStrict;
2502 }
2503 }
2504 else
2505 {
2506 /*
2507 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2508 */
2509 void *pvCurTSS16;
2510 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2511 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2512 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2513 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2514 if (rcStrict != VINF_SUCCESS)
2515 {
2516 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2517 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2518 return rcStrict;
2519 }
2520
2521 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2522 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2523 pCurTSS16->ip = uNextEip;
2524 pCurTSS16->flags = (uint16_t)fEFlags;
2525 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2526 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2527 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2528 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2529 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2530 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2531 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2532 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2533 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2534 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2535 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2536 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2537
2538 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2539 if (rcStrict != VINF_SUCCESS)
2540 {
2541 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2542 VBOXSTRICTRC_VAL(rcStrict)));
2543 return rcStrict;
2544 }
2545 }
2546
2547 /*
2548 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2549 */
2550 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2551 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2552 {
2553 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2554 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2555 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2556 }
2557
2558 /*
2559 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2560 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2561 */
2562 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2563 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2564 bool fNewDebugTrap;
2565 if (fIsNewTSS386)
2566 {
2567 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2568 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2569 uNewEip = pNewTSS32->eip;
2570 uNewEflags = pNewTSS32->eflags;
2571 uNewEax = pNewTSS32->eax;
2572 uNewEcx = pNewTSS32->ecx;
2573 uNewEdx = pNewTSS32->edx;
2574 uNewEbx = pNewTSS32->ebx;
2575 uNewEsp = pNewTSS32->esp;
2576 uNewEbp = pNewTSS32->ebp;
2577 uNewEsi = pNewTSS32->esi;
2578 uNewEdi = pNewTSS32->edi;
2579 uNewES = pNewTSS32->es;
2580 uNewCS = pNewTSS32->cs;
2581 uNewSS = pNewTSS32->ss;
2582 uNewDS = pNewTSS32->ds;
2583 uNewFS = pNewTSS32->fs;
2584 uNewGS = pNewTSS32->gs;
2585 uNewLdt = pNewTSS32->selLdt;
2586 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2587 }
2588 else
2589 {
2590 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2591 uNewCr3 = 0;
2592 uNewEip = pNewTSS16->ip;
2593 uNewEflags = pNewTSS16->flags;
2594 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2595 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2596 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2597 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2598 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2599 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2600 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2601 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2602 uNewES = pNewTSS16->es;
2603 uNewCS = pNewTSS16->cs;
2604 uNewSS = pNewTSS16->ss;
2605 uNewDS = pNewTSS16->ds;
2606 uNewFS = 0;
2607 uNewGS = 0;
2608 uNewLdt = pNewTSS16->selLdt;
2609 fNewDebugTrap = false;
2610 }
2611
2612 if (GCPtrNewTSS == GCPtrCurTSS)
2613 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2614 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2615
2616 /*
2617 * We're done accessing the new TSS.
2618 */
2619 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2620 if (rcStrict != VINF_SUCCESS)
2621 {
2622 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2623 return rcStrict;
2624 }
2625
2626 /*
2627 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2628 */
2629 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2630 {
2631 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2632 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2633 if (rcStrict != VINF_SUCCESS)
2634 {
2635 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2636 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2637 return rcStrict;
2638 }
2639
2640 /* Check that the descriptor indicates the new TSS is available (not busy). */
2641 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2642 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2643 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2644
2645 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2646 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2647 if (rcStrict != VINF_SUCCESS)
2648 {
2649 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2650 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2651 return rcStrict;
2652 }
2653 }
2654
2655 /*
2656 * From this point on, we're technically in the new task. We will defer exceptions
2657 * until the completion of the task switch but before executing any instructions in the new task.
2658 */
2659 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2660 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2661 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2662 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2663 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2664 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2665 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2666
2667 /* Set the busy bit in TR. */
2668 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2669
2670 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2671 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2672 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2673 {
2674 uNewEflags |= X86_EFL_NT;
2675 }
2676
2677 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2678 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2679 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2680
2681 pVCpu->cpum.GstCtx.eip = uNewEip;
2682 pVCpu->cpum.GstCtx.eax = uNewEax;
2683 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2684 pVCpu->cpum.GstCtx.edx = uNewEdx;
2685 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2686 pVCpu->cpum.GstCtx.esp = uNewEsp;
2687 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2688 pVCpu->cpum.GstCtx.esi = uNewEsi;
2689 pVCpu->cpum.GstCtx.edi = uNewEdi;
2690
2691 uNewEflags &= X86_EFL_LIVE_MASK;
2692 uNewEflags |= X86_EFL_RA1_MASK;
2693 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2694
2695 /*
2696 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2697 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2698 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2699 */
2700 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2701 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2702
2703 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2704 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2705
2706 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2707 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2708
2709 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2710 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2711
2712 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2713 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2714
2715 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2716 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2717 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2718
2719 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2720 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2721 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2722 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2723
2724 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2725 {
2726 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2727 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2728 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2729 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2730 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2731 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2732 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2733 }
2734
2735 /*
2736 * Switch CR3 for the new task.
2737 */
2738 if ( fIsNewTSS386
2739 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2740 {
2741 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2742 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2743 AssertRCSuccessReturn(rc, rc);
2744
2745 /* Inform PGM. */
2746 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2747 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2748 AssertRCReturn(rc, rc);
2749 /* ignore informational status codes */
2750
2751 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2752 }
2753
2754 /*
2755 * Switch LDTR for the new task.
2756 */
2757 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2758 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2759 else
2760 {
2761 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2762
2763 IEMSELDESC DescNewLdt;
2764 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2765 if (rcStrict != VINF_SUCCESS)
2766 {
2767 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2768 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2769 return rcStrict;
2770 }
2771 if ( !DescNewLdt.Legacy.Gen.u1Present
2772 || DescNewLdt.Legacy.Gen.u1DescType
2773 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2774 {
2775 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2776 uNewLdt, DescNewLdt.Legacy.u));
2777 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2778 }
2779
2780 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2781 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2782 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2783 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2784 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2785 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2786 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2787 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2788 }
2789
2790 IEMSELDESC DescSS;
2791 if (IEM_IS_V86_MODE(pVCpu))
2792 {
2793 IEM_SET_CPL(pVCpu, 3);
2794 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2795 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2796 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2797 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2798 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2799 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2800
2801 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2802 DescSS.Legacy.u = 0;
2803 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2804 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2805 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2806 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2807 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2808 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2809 DescSS.Legacy.Gen.u2Dpl = 3;
2810 }
2811 else
2812 {
2813 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2814
2815 /*
2816 * Load the stack segment for the new task.
2817 */
2818 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2819 {
2820 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2821 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2822 }
2823
2824 /* Fetch the descriptor. */
2825 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2826 if (rcStrict != VINF_SUCCESS)
2827 {
2828 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2829 VBOXSTRICTRC_VAL(rcStrict)));
2830 return rcStrict;
2831 }
2832
2833 /* SS must be a data segment and writable. */
2834 if ( !DescSS.Legacy.Gen.u1DescType
2835 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2836 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2837 {
2838 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2839 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2840 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2841 }
2842
2843 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2844 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2845 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2846 {
2847 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2848 uNewCpl));
2849 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2850 }
2851
2852 /* Is it there? */
2853 if (!DescSS.Legacy.Gen.u1Present)
2854 {
2855 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2856 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2857 }
2858
2859 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2860 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2861
2862 /* Set the accessed bit before committing the result into SS. */
2863 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2864 {
2865 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2866 if (rcStrict != VINF_SUCCESS)
2867 return rcStrict;
2868 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2869 }
2870
2871 /* Commit SS. */
2872 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2873 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2874 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2875 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2876 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2877 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2878 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2879
2880 /* CPL has changed, update IEM before loading rest of segments. */
2881 IEM_SET_CPL(pVCpu, uNewCpl);
2882
2883 /*
2884 * Load the data segments for the new task.
2885 */
2886 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2887 if (rcStrict != VINF_SUCCESS)
2888 return rcStrict;
2889 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2890 if (rcStrict != VINF_SUCCESS)
2891 return rcStrict;
2892 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2893 if (rcStrict != VINF_SUCCESS)
2894 return rcStrict;
2895 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2896 if (rcStrict != VINF_SUCCESS)
2897 return rcStrict;
2898
2899 /*
2900 * Load the code segment for the new task.
2901 */
2902 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2903 {
2904 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2905 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2906 }
2907
2908 /* Fetch the descriptor. */
2909 IEMSELDESC DescCS;
2910 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2911 if (rcStrict != VINF_SUCCESS)
2912 {
2913 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2914 return rcStrict;
2915 }
2916
2917 /* CS must be a code segment. */
2918 if ( !DescCS.Legacy.Gen.u1DescType
2919 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2920 {
2921 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2922 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2923 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2924 }
2925
2926 /* For conforming CS, DPL must be less than or equal to the RPL. */
2927 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2928 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2929 {
2930 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2931 DescCS.Legacy.Gen.u2Dpl));
2932 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2933 }
2934
2935 /* For non-conforming CS, DPL must match RPL. */
2936 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2937 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2938 {
2939 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2940 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2941 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2942 }
2943
2944 /* Is it there? */
2945 if (!DescCS.Legacy.Gen.u1Present)
2946 {
2947 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2948 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2949 }
2950
2951 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2952 u64Base = X86DESC_BASE(&DescCS.Legacy);
2953
2954 /* Set the accessed bit before committing the result into CS. */
2955 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2956 {
2957 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2958 if (rcStrict != VINF_SUCCESS)
2959 return rcStrict;
2960 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2961 }
2962
2963 /* Commit CS. */
2964 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2965 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2966 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2967 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2968 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2969 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2970 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2971 }
2972
2973 /* Make sure the CPU mode is correct. */
2974 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
2975 if (fExecNew != pVCpu->iem.s.fExec)
2976 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
2977 pVCpu->iem.s.fExec = fExecNew;
2978
2979 /** @todo Debug trap. */
2980 if (fIsNewTSS386 && fNewDebugTrap)
2981 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2982
2983 /*
2984 * Construct the error code masks based on what caused this task switch.
2985 * See Intel Instruction reference for INT.
2986 */
2987 uint16_t uExt;
2988 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2989 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2990 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2991 uExt = 1;
2992 else
2993 uExt = 0;
2994
2995 /*
2996 * Push any error code on to the new stack.
2997 */
2998 if (fFlags & IEM_XCPT_FLAGS_ERR)
2999 {
3000 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3001 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3002 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3003
3004 /* Check that there is sufficient space on the stack. */
3005 /** @todo Factor out segment limit checking for normal/expand down segments
3006 * into a separate function. */
3007 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3008 {
3009 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3010 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3011 {
3012 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3013 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3014 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3015 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3016 }
3017 }
3018 else
3019 {
3020 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3021 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3022 {
3023 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3024 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3025 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3026 }
3027 }
3028
3029
3030 if (fIsNewTSS386)
3031 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3032 else
3033 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3034 if (rcStrict != VINF_SUCCESS)
3035 {
3036 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3037 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3038 return rcStrict;
3039 }
3040 }
3041
3042 /* Check the new EIP against the new CS limit. */
3043 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3044 {
3045 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3046 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3047 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3048 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3049 }
3050
3051 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3052 pVCpu->cpum.GstCtx.ss.Sel));
3053 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3054}
3055
3056
3057/**
3058 * Implements exceptions and interrupts for protected mode.
3059 *
3060 * @returns VBox strict status code.
3061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3062 * @param cbInstr The number of bytes to offset rIP by in the return
3063 * address.
3064 * @param u8Vector The interrupt / exception vector number.
3065 * @param fFlags The flags.
3066 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3067 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3068 */
3069static VBOXSTRICTRC
3070iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3071 uint8_t cbInstr,
3072 uint8_t u8Vector,
3073 uint32_t fFlags,
3074 uint16_t uErr,
3075 uint64_t uCr2) RT_NOEXCEPT
3076{
3077 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3078
3079 /*
3080 * Read the IDT entry.
3081 */
3082 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3083 {
3084 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3085 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3086 }
3087 X86DESC Idte;
3088 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3089 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3090 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3091 {
3092 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3093 return rcStrict;
3094 }
3095 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3096 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3097 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3098 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3099
3100 /*
3101 * Check the descriptor type, DPL and such.
3102 * ASSUMES this is done in the same order as described for call-gate calls.
3103 */
3104 if (Idte.Gate.u1DescType)
3105 {
3106 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3107 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3108 }
3109 bool fTaskGate = false;
3110 uint8_t f32BitGate = true;
3111 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3112 switch (Idte.Gate.u4Type)
3113 {
3114 case X86_SEL_TYPE_SYS_UNDEFINED:
3115 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3116 case X86_SEL_TYPE_SYS_LDT:
3117 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3118 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3119 case X86_SEL_TYPE_SYS_UNDEFINED2:
3120 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3121 case X86_SEL_TYPE_SYS_UNDEFINED3:
3122 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3123 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3124 case X86_SEL_TYPE_SYS_UNDEFINED4:
3125 {
3126 /** @todo check what actually happens when the type is wrong...
3127 * esp. call gates. */
3128 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3129 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3130 }
3131
3132 case X86_SEL_TYPE_SYS_286_INT_GATE:
3133 f32BitGate = false;
3134 RT_FALL_THRU();
3135 case X86_SEL_TYPE_SYS_386_INT_GATE:
3136 fEflToClear |= X86_EFL_IF;
3137 break;
3138
3139 case X86_SEL_TYPE_SYS_TASK_GATE:
3140 fTaskGate = true;
3141#ifndef IEM_IMPLEMENTS_TASKSWITCH
3142 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3143#endif
3144 break;
3145
3146 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3147 f32BitGate = false;
3148 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3149 break;
3150
3151 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3152 }
3153
3154 /* Check DPL against CPL if applicable. */
3155 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3156 {
3157 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3158 {
3159 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3160 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3161 }
3162 }
3163
3164 /* Is it there? */
3165 if (!Idte.Gate.u1Present)
3166 {
3167 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3168 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3169 }
3170
3171 /* Is it a task-gate? */
3172 if (fTaskGate)
3173 {
3174 /*
3175 * Construct the error code masks based on what caused this task switch.
3176 * See Intel Instruction reference for INT.
3177 */
3178 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3179 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3180 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3181 RTSEL SelTSS = Idte.Gate.u16Sel;
3182
3183 /*
3184 * Fetch the TSS descriptor in the GDT.
3185 */
3186 IEMSELDESC DescTSS;
3187 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3188 if (rcStrict != VINF_SUCCESS)
3189 {
3190 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3191 VBOXSTRICTRC_VAL(rcStrict)));
3192 return rcStrict;
3193 }
3194
3195 /* The TSS descriptor must be a system segment and be available (not busy). */
3196 if ( DescTSS.Legacy.Gen.u1DescType
3197 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3198 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3199 {
3200 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3201 u8Vector, SelTSS, DescTSS.Legacy.au64));
3202 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3203 }
3204
3205 /* The TSS must be present. */
3206 if (!DescTSS.Legacy.Gen.u1Present)
3207 {
3208 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3209 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3210 }
3211
3212 /* Do the actual task switch. */
3213 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3214 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3215 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3216 }
3217
3218 /* A null CS is bad. */
3219 RTSEL NewCS = Idte.Gate.u16Sel;
3220 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3221 {
3222 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3223 return iemRaiseGeneralProtectionFault0(pVCpu);
3224 }
3225
3226 /* Fetch the descriptor for the new CS. */
3227 IEMSELDESC DescCS;
3228 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3229 if (rcStrict != VINF_SUCCESS)
3230 {
3231 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3232 return rcStrict;
3233 }
3234
3235 /* Must be a code segment. */
3236 if (!DescCS.Legacy.Gen.u1DescType)
3237 {
3238 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3239 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3240 }
3241 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3242 {
3243 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3244 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3245 }
3246
3247 /* Don't allow lowering the privilege level. */
3248 /** @todo Does the lowering of privileges apply to software interrupts
3249 * only? This has bearings on the more-privileged or
3250 * same-privilege stack behavior further down. A testcase would
3251 * be nice. */
3252 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3253 {
3254 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3255 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3256 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3257 }
3258
3259 /* Make sure the selector is present. */
3260 if (!DescCS.Legacy.Gen.u1Present)
3261 {
3262 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3263 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3264 }
3265
3266#ifdef LOG_ENABLED
3267 /* If software interrupt, try decode it if logging is enabled and such. */
3268 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3269 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3270 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3271#endif
3272
3273 /* Check the new EIP against the new CS limit. */
3274 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3275 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3276 ? Idte.Gate.u16OffsetLow
3277 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3278 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3279 if (uNewEip > cbLimitCS)
3280 {
3281 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3282 u8Vector, uNewEip, cbLimitCS, NewCS));
3283 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3284 }
3285 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3286
3287 /* Calc the flag image to push. */
3288 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3289 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3290 fEfl &= ~X86_EFL_RF;
3291 else
3292 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3293
3294 /* From V8086 mode only go to CPL 0. */
3295 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3296 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3297 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3298 {
3299 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3300 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3301 }
3302
3303 /*
3304 * If the privilege level changes, we need to get a new stack from the TSS.
3305 * This in turns means validating the new SS and ESP...
3306 */
3307 if (uNewCpl != IEM_GET_CPL(pVCpu))
3308 {
3309 RTSEL NewSS;
3310 uint32_t uNewEsp;
3311 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3312 if (rcStrict != VINF_SUCCESS)
3313 return rcStrict;
3314
3315 IEMSELDESC DescSS;
3316 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3317 if (rcStrict != VINF_SUCCESS)
3318 return rcStrict;
3319 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3320 if (!DescSS.Legacy.Gen.u1DefBig)
3321 {
3322 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3323 uNewEsp = (uint16_t)uNewEsp;
3324 }
3325
3326 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3327
3328 /* Check that there is sufficient space for the stack frame. */
3329 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3330 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3331 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3332 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3333
3334 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3335 {
3336 if ( uNewEsp - 1 > cbLimitSS
3337 || uNewEsp < cbStackFrame)
3338 {
3339 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3340 u8Vector, NewSS, uNewEsp, cbStackFrame));
3341 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3342 }
3343 }
3344 else
3345 {
3346 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3347 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3348 {
3349 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3350 u8Vector, NewSS, uNewEsp, cbStackFrame));
3351 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3352 }
3353 }
3354
3355 /*
3356 * Start making changes.
3357 */
3358
3359 /* Set the new CPL so that stack accesses use it. */
3360 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3361 IEM_SET_CPL(pVCpu, uNewCpl);
3362
3363 /* Create the stack frame. */
3364 RTPTRUNION uStackFrame;
3365 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3366 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3367 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3368 if (rcStrict != VINF_SUCCESS)
3369 return rcStrict;
3370 void * const pvStackFrame = uStackFrame.pv;
3371 if (f32BitGate)
3372 {
3373 if (fFlags & IEM_XCPT_FLAGS_ERR)
3374 *uStackFrame.pu32++ = uErr;
3375 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3376 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3377 uStackFrame.pu32[2] = fEfl;
3378 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3379 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3380 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3381 if (fEfl & X86_EFL_VM)
3382 {
3383 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3384 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3385 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3386 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3387 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3388 }
3389 }
3390 else
3391 {
3392 if (fFlags & IEM_XCPT_FLAGS_ERR)
3393 *uStackFrame.pu16++ = uErr;
3394 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3395 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3396 uStackFrame.pu16[2] = fEfl;
3397 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3398 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3399 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3400 if (fEfl & X86_EFL_VM)
3401 {
3402 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3403 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3404 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3405 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3406 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3407 }
3408 }
3409 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3410 if (rcStrict != VINF_SUCCESS)
3411 return rcStrict;
3412
3413 /* Mark the selectors 'accessed' (hope this is the correct time). */
3414 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3415 * after pushing the stack frame? (Write protect the gdt + stack to
3416 * find out.) */
3417 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3418 {
3419 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3420 if (rcStrict != VINF_SUCCESS)
3421 return rcStrict;
3422 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3423 }
3424
3425 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3426 {
3427 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3428 if (rcStrict != VINF_SUCCESS)
3429 return rcStrict;
3430 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3431 }
3432
3433 /*
3434 * Start comitting the register changes (joins with the DPL=CPL branch).
3435 */
3436 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3437 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3438 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3439 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3440 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3441 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3442 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3443 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3444 * SP is loaded).
3445 * Need to check the other combinations too:
3446 * - 16-bit TSS, 32-bit handler
3447 * - 32-bit TSS, 16-bit handler */
3448 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3449 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3450 else
3451 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3452
3453 if (fEfl & X86_EFL_VM)
3454 {
3455 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3456 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3457 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3458 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3459 }
3460 }
3461 /*
3462 * Same privilege, no stack change and smaller stack frame.
3463 */
3464 else
3465 {
3466 uint64_t uNewRsp;
3467 RTPTRUNION uStackFrame;
3468 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3469 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3470 if (rcStrict != VINF_SUCCESS)
3471 return rcStrict;
3472 void * const pvStackFrame = uStackFrame.pv;
3473
3474 if (f32BitGate)
3475 {
3476 if (fFlags & IEM_XCPT_FLAGS_ERR)
3477 *uStackFrame.pu32++ = uErr;
3478 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3479 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3480 uStackFrame.pu32[2] = fEfl;
3481 }
3482 else
3483 {
3484 if (fFlags & IEM_XCPT_FLAGS_ERR)
3485 *uStackFrame.pu16++ = uErr;
3486 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3487 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3488 uStackFrame.pu16[2] = fEfl;
3489 }
3490 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3491 if (rcStrict != VINF_SUCCESS)
3492 return rcStrict;
3493
3494 /* Mark the CS selector as 'accessed'. */
3495 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3496 {
3497 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3498 if (rcStrict != VINF_SUCCESS)
3499 return rcStrict;
3500 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3501 }
3502
3503 /*
3504 * Start committing the register changes (joins with the other branch).
3505 */
3506 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3507 }
3508
3509 /* ... register committing continues. */
3510 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3511 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3512 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3513 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3514 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3515 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3516
3517 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3518 fEfl &= ~fEflToClear;
3519 IEMMISC_SET_EFL(pVCpu, fEfl);
3520
3521 if (fFlags & IEM_XCPT_FLAGS_CR2)
3522 pVCpu->cpum.GstCtx.cr2 = uCr2;
3523
3524 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3525 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3526
3527 /* Make sure the execution flags are correct. */
3528 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3529 if (fExecNew != pVCpu->iem.s.fExec)
3530 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3531 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3532 pVCpu->iem.s.fExec = fExecNew;
3533 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3534
3535 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3536}
3537
3538
3539/**
3540 * Implements exceptions and interrupts for long mode.
3541 *
3542 * @returns VBox strict status code.
3543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3544 * @param cbInstr The number of bytes to offset rIP by in the return
3545 * address.
3546 * @param u8Vector The interrupt / exception vector number.
3547 * @param fFlags The flags.
3548 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3549 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3550 */
3551static VBOXSTRICTRC
3552iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3553 uint8_t cbInstr,
3554 uint8_t u8Vector,
3555 uint32_t fFlags,
3556 uint16_t uErr,
3557 uint64_t uCr2) RT_NOEXCEPT
3558{
3559 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3560
3561 /*
3562 * Read the IDT entry.
3563 */
3564 uint16_t offIdt = (uint16_t)u8Vector << 4;
3565 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3566 {
3567 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3568 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3569 }
3570 X86DESC64 Idte;
3571#ifdef _MSC_VER /* Shut up silly compiler warning. */
3572 Idte.au64[0] = 0;
3573 Idte.au64[1] = 0;
3574#endif
3575 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3576 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3577 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3578 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3579 {
3580 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3581 return rcStrict;
3582 }
3583 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3584 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3585 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3586
3587 /*
3588 * Check the descriptor type, DPL and such.
3589 * ASSUMES this is done in the same order as described for call-gate calls.
3590 */
3591 if (Idte.Gate.u1DescType)
3592 {
3593 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3594 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3595 }
3596 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3597 switch (Idte.Gate.u4Type)
3598 {
3599 case AMD64_SEL_TYPE_SYS_INT_GATE:
3600 fEflToClear |= X86_EFL_IF;
3601 break;
3602 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3603 break;
3604
3605 default:
3606 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3607 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3608 }
3609
3610 /* Check DPL against CPL if applicable. */
3611 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3612 {
3613 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3614 {
3615 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3616 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3617 }
3618 }
3619
3620 /* Is it there? */
3621 if (!Idte.Gate.u1Present)
3622 {
3623 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3624 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3625 }
3626
3627 /* A null CS is bad. */
3628 RTSEL NewCS = Idte.Gate.u16Sel;
3629 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3630 {
3631 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3632 return iemRaiseGeneralProtectionFault0(pVCpu);
3633 }
3634
3635 /* Fetch the descriptor for the new CS. */
3636 IEMSELDESC DescCS;
3637 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3638 if (rcStrict != VINF_SUCCESS)
3639 {
3640 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3641 return rcStrict;
3642 }
3643
3644 /* Must be a 64-bit code segment. */
3645 if (!DescCS.Long.Gen.u1DescType)
3646 {
3647 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3648 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3649 }
3650 if ( !DescCS.Long.Gen.u1Long
3651 || DescCS.Long.Gen.u1DefBig
3652 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3653 {
3654 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3655 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3656 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3657 }
3658
3659 /* Don't allow lowering the privilege level. For non-conforming CS
3660 selectors, the CS.DPL sets the privilege level the trap/interrupt
3661 handler runs at. For conforming CS selectors, the CPL remains
3662 unchanged, but the CS.DPL must be <= CPL. */
3663 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3664 * when CPU in Ring-0. Result \#GP? */
3665 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3666 {
3667 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3668 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3669 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3670 }
3671
3672
3673 /* Make sure the selector is present. */
3674 if (!DescCS.Legacy.Gen.u1Present)
3675 {
3676 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3677 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3678 }
3679
3680 /* Check that the new RIP is canonical. */
3681 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3682 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3683 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3684 if (!IEM_IS_CANONICAL(uNewRip))
3685 {
3686 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3687 return iemRaiseGeneralProtectionFault0(pVCpu);
3688 }
3689
3690 /*
3691 * If the privilege level changes or if the IST isn't zero, we need to get
3692 * a new stack from the TSS.
3693 */
3694 uint64_t uNewRsp;
3695 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3696 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3697 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3698 || Idte.Gate.u3IST != 0)
3699 {
3700 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3701 if (rcStrict != VINF_SUCCESS)
3702 return rcStrict;
3703 }
3704 else
3705 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3706 uNewRsp &= ~(uint64_t)0xf;
3707
3708 /*
3709 * Calc the flag image to push.
3710 */
3711 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3712 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3713 fEfl &= ~X86_EFL_RF;
3714 else
3715 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3716
3717 /*
3718 * Start making changes.
3719 */
3720 /* Set the new CPL so that stack accesses use it. */
3721 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3722 IEM_SET_CPL(pVCpu, uNewCpl);
3723/** @todo Setting CPL this early seems wrong as it would affect and errors we
3724 * raise accessing the stack and (?) GDT/LDT... */
3725
3726 /* Create the stack frame. */
3727 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3728 RTPTRUNION uStackFrame;
3729 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3730 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3731 if (rcStrict != VINF_SUCCESS)
3732 return rcStrict;
3733 void * const pvStackFrame = uStackFrame.pv;
3734
3735 if (fFlags & IEM_XCPT_FLAGS_ERR)
3736 *uStackFrame.pu64++ = uErr;
3737 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3738 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3739 uStackFrame.pu64[2] = fEfl;
3740 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3741 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3742 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3743 if (rcStrict != VINF_SUCCESS)
3744 return rcStrict;
3745
3746 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3747 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3748 * after pushing the stack frame? (Write protect the gdt + stack to
3749 * find out.) */
3750 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3751 {
3752 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3753 if (rcStrict != VINF_SUCCESS)
3754 return rcStrict;
3755 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3756 }
3757
3758 /*
3759 * Start comitting the register changes.
3760 */
3761 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3762 * hidden registers when interrupting 32-bit or 16-bit code! */
3763 if (uNewCpl != uOldCpl)
3764 {
3765 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3766 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3767 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3768 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3769 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3770 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3771 }
3772 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3773 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3774 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3775 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3776 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3777 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3778 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3779 pVCpu->cpum.GstCtx.rip = uNewRip;
3780
3781 fEfl &= ~fEflToClear;
3782 IEMMISC_SET_EFL(pVCpu, fEfl);
3783
3784 if (fFlags & IEM_XCPT_FLAGS_CR2)
3785 pVCpu->cpum.GstCtx.cr2 = uCr2;
3786
3787 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3788 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3789
3790 iemRecalcExecModeAndCplFlags(pVCpu);
3791
3792 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3793}
3794
3795
3796/**
3797 * Implements exceptions and interrupts.
3798 *
3799 * All exceptions and interrupts goes thru this function!
3800 *
3801 * @returns VBox strict status code.
3802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3803 * @param cbInstr The number of bytes to offset rIP by in the return
3804 * address.
3805 * @param u8Vector The interrupt / exception vector number.
3806 * @param fFlags The flags.
3807 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3808 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3809 */
3810VBOXSTRICTRC
3811iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3812 uint8_t cbInstr,
3813 uint8_t u8Vector,
3814 uint32_t fFlags,
3815 uint16_t uErr,
3816 uint64_t uCr2) RT_NOEXCEPT
3817{
3818 /*
3819 * Get all the state that we might need here.
3820 */
3821 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3822 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3823
3824#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3825 /*
3826 * Flush prefetch buffer
3827 */
3828 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3829#endif
3830
3831 /*
3832 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3833 */
3834 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3835 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3836 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3837 | IEM_XCPT_FLAGS_BP_INSTR
3838 | IEM_XCPT_FLAGS_ICEBP_INSTR
3839 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3840 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3841 {
3842 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3843 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3844 u8Vector = X86_XCPT_GP;
3845 uErr = 0;
3846 }
3847#ifdef DBGFTRACE_ENABLED
3848 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3849 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3850 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3851#endif
3852
3853 /*
3854 * Evaluate whether NMI blocking should be in effect.
3855 * Normally, NMI blocking is in effect whenever we inject an NMI.
3856 */
3857 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3858 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3859
3860#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3861 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3862 {
3863 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3864 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3865 return rcStrict0;
3866
3867 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3868 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3869 {
3870 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3871 fBlockNmi = false;
3872 }
3873 }
3874#endif
3875
3876#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3877 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3878 {
3879 /*
3880 * If the event is being injected as part of VMRUN, it isn't subject to event
3881 * intercepts in the nested-guest. However, secondary exceptions that occur
3882 * during injection of any event -are- subject to exception intercepts.
3883 *
3884 * See AMD spec. 15.20 "Event Injection".
3885 */
3886 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3887 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3888 else
3889 {
3890 /*
3891 * Check and handle if the event being raised is intercepted.
3892 */
3893 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3894 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3895 return rcStrict0;
3896 }
3897 }
3898#endif
3899
3900 /*
3901 * Set NMI blocking if necessary.
3902 */
3903 if (fBlockNmi)
3904 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3905
3906 /*
3907 * Do recursion accounting.
3908 */
3909 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3910 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3911 if (pVCpu->iem.s.cXcptRecursions == 0)
3912 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3913 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3914 else
3915 {
3916 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3917 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3918 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3919
3920 if (pVCpu->iem.s.cXcptRecursions >= 4)
3921 {
3922#ifdef DEBUG_bird
3923 AssertFailed();
3924#endif
3925 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3926 }
3927
3928 /*
3929 * Evaluate the sequence of recurring events.
3930 */
3931 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3932 NULL /* pXcptRaiseInfo */);
3933 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3934 { /* likely */ }
3935 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3936 {
3937 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3938 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3939 u8Vector = X86_XCPT_DF;
3940 uErr = 0;
3941#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3942 /* VMX nested-guest #DF intercept needs to be checked here. */
3943 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3944 {
3945 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3946 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3947 return rcStrict0;
3948 }
3949#endif
3950 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3951 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3952 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3953 }
3954 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3955 {
3956 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3957 return iemInitiateCpuShutdown(pVCpu);
3958 }
3959 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3960 {
3961 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3962 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3963 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3964 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3965 return VERR_EM_GUEST_CPU_HANG;
3966 }
3967 else
3968 {
3969 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3970 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3971 return VERR_IEM_IPE_9;
3972 }
3973
3974 /*
3975 * The 'EXT' bit is set when an exception occurs during deliver of an external
3976 * event (such as an interrupt or earlier exception)[1]. Privileged software
3977 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3978 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3979 *
3980 * [1] - Intel spec. 6.13 "Error Code"
3981 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3982 * [3] - Intel Instruction reference for INT n.
3983 */
3984 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3985 && (fFlags & IEM_XCPT_FLAGS_ERR)
3986 && u8Vector != X86_XCPT_PF
3987 && u8Vector != X86_XCPT_DF)
3988 {
3989 uErr |= X86_TRAP_ERR_EXTERNAL;
3990 }
3991 }
3992
3993 pVCpu->iem.s.cXcptRecursions++;
3994 pVCpu->iem.s.uCurXcpt = u8Vector;
3995 pVCpu->iem.s.fCurXcpt = fFlags;
3996 pVCpu->iem.s.uCurXcptErr = uErr;
3997 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3998
3999 /*
4000 * Extensive logging.
4001 */
4002#if defined(LOG_ENABLED) && defined(IN_RING3)
4003 if (LogIs3Enabled())
4004 {
4005 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4006 PVM pVM = pVCpu->CTX_SUFF(pVM);
4007 char szRegs[4096];
4008 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4009 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4010 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4011 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4012 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4013 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4014 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4015 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4016 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4017 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4018 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4019 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4020 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4021 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4022 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4023 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4024 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4025 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4026 " efer=%016VR{efer}\n"
4027 " pat=%016VR{pat}\n"
4028 " sf_mask=%016VR{sf_mask}\n"
4029 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4030 " lstar=%016VR{lstar}\n"
4031 " star=%016VR{star} cstar=%016VR{cstar}\n"
4032 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4033 );
4034
4035 char szInstr[256];
4036 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4037 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4038 szInstr, sizeof(szInstr), NULL);
4039 Log3(("%s%s\n", szRegs, szInstr));
4040 }
4041#endif /* LOG_ENABLED */
4042
4043 /*
4044 * Stats.
4045 */
4046 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4047 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4048 else if (u8Vector <= X86_XCPT_LAST)
4049 {
4050 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4051 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4052 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4053 }
4054
4055 /*
4056 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4057 * to ensure that a stale TLB or paging cache entry will only cause one
4058 * spurious #PF.
4059 */
4060 if ( u8Vector == X86_XCPT_PF
4061 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4062 IEMTlbInvalidatePage(pVCpu, uCr2);
4063
4064 /*
4065 * Call the mode specific worker function.
4066 */
4067 VBOXSTRICTRC rcStrict;
4068 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4069 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4070 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4071 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4072 else
4073 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4074
4075 /* Flush the prefetch buffer. */
4076 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4077
4078 /*
4079 * Unwind.
4080 */
4081 pVCpu->iem.s.cXcptRecursions--;
4082 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4083 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4084 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4085 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4086 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4087 return rcStrict;
4088}
4089
4090#ifdef IEM_WITH_SETJMP
4091/**
4092 * See iemRaiseXcptOrInt. Will not return.
4093 */
4094DECL_NO_RETURN(void)
4095iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4096 uint8_t cbInstr,
4097 uint8_t u8Vector,
4098 uint32_t fFlags,
4099 uint16_t uErr,
4100 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4101{
4102 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4103 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4104}
4105#endif
4106
4107
4108/** \#DE - 00. */
4109VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4110{
4111 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4112}
4113
4114
4115/** \#DB - 01.
4116 * @note This automatically clear DR7.GD. */
4117VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4118{
4119 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4120 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4121 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4122}
4123
4124
4125/** \#BR - 05. */
4126VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4127{
4128 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4129}
4130
4131
4132/** \#UD - 06. */
4133VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4134{
4135 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4136}
4137
4138
4139/** \#NM - 07. */
4140VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4141{
4142 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4143}
4144
4145
4146/** \#TS(err) - 0a. */
4147VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4148{
4149 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4150}
4151
4152
4153/** \#TS(tr) - 0a. */
4154VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4155{
4156 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4157 pVCpu->cpum.GstCtx.tr.Sel, 0);
4158}
4159
4160
4161/** \#TS(0) - 0a. */
4162VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4163{
4164 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4165 0, 0);
4166}
4167
4168
4169/** \#TS(err) - 0a. */
4170VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4171{
4172 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4173 uSel & X86_SEL_MASK_OFF_RPL, 0);
4174}
4175
4176
4177/** \#NP(err) - 0b. */
4178VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4179{
4180 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4181}
4182
4183
4184/** \#NP(sel) - 0b. */
4185VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4186{
4187 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4188 uSel & ~X86_SEL_RPL, 0);
4189}
4190
4191
4192/** \#SS(seg) - 0c. */
4193VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4194{
4195 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4196 uSel & ~X86_SEL_RPL, 0);
4197}
4198
4199
4200/** \#SS(err) - 0c. */
4201VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4202{
4203 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4204}
4205
4206
4207/** \#GP(n) - 0d. */
4208VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4209{
4210 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4211}
4212
4213
4214/** \#GP(0) - 0d. */
4215VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4216{
4217 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4218}
4219
4220#ifdef IEM_WITH_SETJMP
4221/** \#GP(0) - 0d. */
4222DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4223{
4224 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4225}
4226#endif
4227
4228
4229/** \#GP(sel) - 0d. */
4230VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4231{
4232 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4233 Sel & ~X86_SEL_RPL, 0);
4234}
4235
4236
4237/** \#GP(0) - 0d. */
4238VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4239{
4240 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4241}
4242
4243
4244/** \#GP(sel) - 0d. */
4245VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4246{
4247 NOREF(iSegReg); NOREF(fAccess);
4248 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4249 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4250}
4251
4252#ifdef IEM_WITH_SETJMP
4253/** \#GP(sel) - 0d, longjmp. */
4254DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4255{
4256 NOREF(iSegReg); NOREF(fAccess);
4257 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4258 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4259}
4260#endif
4261
4262/** \#GP(sel) - 0d. */
4263VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4264{
4265 NOREF(Sel);
4266 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4267}
4268
4269#ifdef IEM_WITH_SETJMP
4270/** \#GP(sel) - 0d, longjmp. */
4271DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4272{
4273 NOREF(Sel);
4274 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4275}
4276#endif
4277
4278
4279/** \#GP(sel) - 0d. */
4280VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4281{
4282 NOREF(iSegReg); NOREF(fAccess);
4283 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4284}
4285
4286#ifdef IEM_WITH_SETJMP
4287/** \#GP(sel) - 0d, longjmp. */
4288DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4289{
4290 NOREF(iSegReg); NOREF(fAccess);
4291 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4292}
4293#endif
4294
4295
4296/** \#PF(n) - 0e. */
4297VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4298{
4299 uint16_t uErr;
4300 switch (rc)
4301 {
4302 case VERR_PAGE_NOT_PRESENT:
4303 case VERR_PAGE_TABLE_NOT_PRESENT:
4304 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4305 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4306 uErr = 0;
4307 break;
4308
4309 default:
4310 AssertMsgFailed(("%Rrc\n", rc));
4311 RT_FALL_THRU();
4312 case VERR_ACCESS_DENIED:
4313 uErr = X86_TRAP_PF_P;
4314 break;
4315
4316 /** @todo reserved */
4317 }
4318
4319 if (IEM_GET_CPL(pVCpu) == 3)
4320 uErr |= X86_TRAP_PF_US;
4321
4322 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4323 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4324 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4325 uErr |= X86_TRAP_PF_ID;
4326
4327#if 0 /* This is so much non-sense, really. Why was it done like that? */
4328 /* Note! RW access callers reporting a WRITE protection fault, will clear
4329 the READ flag before calling. So, read-modify-write accesses (RW)
4330 can safely be reported as READ faults. */
4331 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4332 uErr |= X86_TRAP_PF_RW;
4333#else
4334 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4335 {
4336 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4337 /// (regardless of outcome of the comparison in the latter case).
4338 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4339 uErr |= X86_TRAP_PF_RW;
4340 }
4341#endif
4342
4343 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4344 of the memory operand rather than at the start of it. (Not sure what
4345 happens if it crosses a page boundrary.) The current heuristics for
4346 this is to report the #PF for the last byte if the access is more than
4347 64 bytes. This is probably not correct, but we can work that out later,
4348 main objective now is to get FXSAVE to work like for real hardware and
4349 make bs3-cpu-basic2 work. */
4350 if (cbAccess <= 64)
4351 { /* likely*/ }
4352 else
4353 GCPtrWhere += cbAccess - 1;
4354
4355 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4356 uErr, GCPtrWhere);
4357}
4358
4359#ifdef IEM_WITH_SETJMP
4360/** \#PF(n) - 0e, longjmp. */
4361DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4362 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4363{
4364 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4365}
4366#endif
4367
4368
4369/** \#MF(0) - 10. */
4370VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4371{
4372 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4373 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4374
4375 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4376 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4377 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4378}
4379
4380
4381/** \#AC(0) - 11. */
4382VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4383{
4384 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4385}
4386
4387#ifdef IEM_WITH_SETJMP
4388/** \#AC(0) - 11, longjmp. */
4389DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4390{
4391 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4392}
4393#endif
4394
4395
4396/** \#XF(0)/\#XM(0) - 19. */
4397VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4398{
4399 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4400}
4401
4402
4403/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4404IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4405{
4406 NOREF(cbInstr);
4407 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4408}
4409
4410
4411/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4412IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4413{
4414 NOREF(cbInstr);
4415 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4416}
4417
4418
4419/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4420IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4421{
4422 NOREF(cbInstr);
4423 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4424}
4425
4426
4427/** @} */
4428
4429/** @name Common opcode decoders.
4430 * @{
4431 */
4432//#include <iprt/mem.h>
4433
4434/**
4435 * Used to add extra details about a stub case.
4436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4437 */
4438void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4439{
4440#if defined(LOG_ENABLED) && defined(IN_RING3)
4441 PVM pVM = pVCpu->CTX_SUFF(pVM);
4442 char szRegs[4096];
4443 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4444 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4445 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4446 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4447 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4448 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4449 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4450 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4451 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4452 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4453 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4454 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4455 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4456 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4457 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4458 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4459 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4460 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4461 " efer=%016VR{efer}\n"
4462 " pat=%016VR{pat}\n"
4463 " sf_mask=%016VR{sf_mask}\n"
4464 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4465 " lstar=%016VR{lstar}\n"
4466 " star=%016VR{star} cstar=%016VR{cstar}\n"
4467 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4468 );
4469
4470 char szInstr[256];
4471 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4472 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4473 szInstr, sizeof(szInstr), NULL);
4474
4475 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4476#else
4477 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4478#endif
4479}
4480
4481/** @} */
4482
4483
4484
4485/** @name Register Access.
4486 * @{
4487 */
4488
4489/**
4490 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4491 *
4492 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4493 * segment limit.
4494 *
4495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4496 * @param cbInstr Instruction size.
4497 * @param offNextInstr The offset of the next instruction.
4498 * @param enmEffOpSize Effective operand size.
4499 */
4500VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4501 IEMMODE enmEffOpSize) RT_NOEXCEPT
4502{
4503 switch (enmEffOpSize)
4504 {
4505 case IEMMODE_16BIT:
4506 {
4507 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4508 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4509 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4510 pVCpu->cpum.GstCtx.rip = uNewIp;
4511 else
4512 return iemRaiseGeneralProtectionFault0(pVCpu);
4513 break;
4514 }
4515
4516 case IEMMODE_32BIT:
4517 {
4518 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4519 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4520
4521 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4522 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4523 pVCpu->cpum.GstCtx.rip = uNewEip;
4524 else
4525 return iemRaiseGeneralProtectionFault0(pVCpu);
4526 break;
4527 }
4528
4529 case IEMMODE_64BIT:
4530 {
4531 Assert(IEM_IS_64BIT_CODE(pVCpu));
4532
4533 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4534 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4535 pVCpu->cpum.GstCtx.rip = uNewRip;
4536 else
4537 return iemRaiseGeneralProtectionFault0(pVCpu);
4538 break;
4539 }
4540
4541 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4542 }
4543
4544#ifndef IEM_WITH_CODE_TLB
4545 /* Flush the prefetch buffer. */
4546 pVCpu->iem.s.cbOpcode = cbInstr;
4547#endif
4548
4549 /*
4550 * Clear RF and finish the instruction (maybe raise #DB).
4551 */
4552 return iemRegFinishClearingRF(pVCpu);
4553}
4554
4555
4556/**
4557 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4558 *
4559 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4560 * segment limit.
4561 *
4562 * @returns Strict VBox status code.
4563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4564 * @param cbInstr Instruction size.
4565 * @param offNextInstr The offset of the next instruction.
4566 */
4567VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4568{
4569 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4570
4571 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4572 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4573 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4574 pVCpu->cpum.GstCtx.rip = uNewIp;
4575 else
4576 return iemRaiseGeneralProtectionFault0(pVCpu);
4577
4578#ifndef IEM_WITH_CODE_TLB
4579 /* Flush the prefetch buffer. */
4580 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4581#endif
4582
4583 /*
4584 * Clear RF and finish the instruction (maybe raise #DB).
4585 */
4586 return iemRegFinishClearingRF(pVCpu);
4587}
4588
4589
4590/**
4591 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4592 *
4593 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4594 * segment limit.
4595 *
4596 * @returns Strict VBox status code.
4597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4598 * @param cbInstr Instruction size.
4599 * @param offNextInstr The offset of the next instruction.
4600 * @param enmEffOpSize Effective operand size.
4601 */
4602VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4603 IEMMODE enmEffOpSize) RT_NOEXCEPT
4604{
4605 if (enmEffOpSize == IEMMODE_32BIT)
4606 {
4607 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4608
4609 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4610 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4611 pVCpu->cpum.GstCtx.rip = uNewEip;
4612 else
4613 return iemRaiseGeneralProtectionFault0(pVCpu);
4614 }
4615 else
4616 {
4617 Assert(enmEffOpSize == IEMMODE_64BIT);
4618
4619 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4620 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4621 pVCpu->cpum.GstCtx.rip = uNewRip;
4622 else
4623 return iemRaiseGeneralProtectionFault0(pVCpu);
4624 }
4625
4626#ifndef IEM_WITH_CODE_TLB
4627 /* Flush the prefetch buffer. */
4628 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4629#endif
4630
4631 /*
4632 * Clear RF and finish the instruction (maybe raise #DB).
4633 */
4634 return iemRegFinishClearingRF(pVCpu);
4635}
4636
4637
4638/**
4639 * Performs a near jump to the specified address.
4640 *
4641 * May raise a \#GP(0) if the new IP outside the code segment limit.
4642 *
4643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4644 * @param uNewIp The new IP value.
4645 */
4646VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4647{
4648 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4649 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
4650 pVCpu->cpum.GstCtx.rip = uNewIp;
4651 else
4652 return iemRaiseGeneralProtectionFault0(pVCpu);
4653 /** @todo Test 16-bit jump in 64-bit mode. */
4654
4655#ifndef IEM_WITH_CODE_TLB
4656 /* Flush the prefetch buffer. */
4657 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4658#endif
4659
4660 /*
4661 * Clear RF and finish the instruction (maybe raise #DB).
4662 */
4663 return iemRegFinishClearingRF(pVCpu);
4664}
4665
4666
4667/**
4668 * Performs a near jump to the specified address.
4669 *
4670 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4671 *
4672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4673 * @param uNewEip The new EIP value.
4674 */
4675VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4676{
4677 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4678 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4679
4680 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4681 pVCpu->cpum.GstCtx.rip = uNewEip;
4682 else
4683 return iemRaiseGeneralProtectionFault0(pVCpu);
4684
4685#ifndef IEM_WITH_CODE_TLB
4686 /* Flush the prefetch buffer. */
4687 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4688#endif
4689
4690 /*
4691 * Clear RF and finish the instruction (maybe raise #DB).
4692 */
4693 return iemRegFinishClearingRF(pVCpu);
4694}
4695
4696
4697/**
4698 * Performs a near jump to the specified address.
4699 *
4700 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4701 * segment limit.
4702 *
4703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4704 * @param uNewRip The new RIP value.
4705 */
4706VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4707{
4708 Assert(IEM_IS_64BIT_CODE(pVCpu));
4709
4710 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4711 pVCpu->cpum.GstCtx.rip = uNewRip;
4712 else
4713 return iemRaiseGeneralProtectionFault0(pVCpu);
4714
4715#ifndef IEM_WITH_CODE_TLB
4716 /* Flush the prefetch buffer. */
4717 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4718#endif
4719
4720 /*
4721 * Clear RF and finish the instruction (maybe raise #DB).
4722 */
4723 return iemRegFinishClearingRF(pVCpu);
4724}
4725
4726/** @} */
4727
4728
4729/** @name FPU access and helpers.
4730 *
4731 * @{
4732 */
4733
4734/**
4735 * Updates the x87.DS and FPUDP registers.
4736 *
4737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4738 * @param pFpuCtx The FPU context.
4739 * @param iEffSeg The effective segment register.
4740 * @param GCPtrEff The effective address relative to @a iEffSeg.
4741 */
4742DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4743{
4744 RTSEL sel;
4745 switch (iEffSeg)
4746 {
4747 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4748 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4749 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4750 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4751 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4752 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4753 default:
4754 AssertMsgFailed(("%d\n", iEffSeg));
4755 sel = pVCpu->cpum.GstCtx.ds.Sel;
4756 }
4757 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4758 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4759 {
4760 pFpuCtx->DS = 0;
4761 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4762 }
4763 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4764 {
4765 pFpuCtx->DS = sel;
4766 pFpuCtx->FPUDP = GCPtrEff;
4767 }
4768 else
4769 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4770}
4771
4772
4773/**
4774 * Rotates the stack registers in the push direction.
4775 *
4776 * @param pFpuCtx The FPU context.
4777 * @remarks This is a complete waste of time, but fxsave stores the registers in
4778 * stack order.
4779 */
4780DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4781{
4782 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4783 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4784 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4785 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4786 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4787 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4788 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4789 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4790 pFpuCtx->aRegs[0].r80 = r80Tmp;
4791}
4792
4793
4794/**
4795 * Rotates the stack registers in the pop direction.
4796 *
4797 * @param pFpuCtx The FPU context.
4798 * @remarks This is a complete waste of time, but fxsave stores the registers in
4799 * stack order.
4800 */
4801DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4802{
4803 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4804 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4805 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4806 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4807 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4808 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4809 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4810 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4811 pFpuCtx->aRegs[7].r80 = r80Tmp;
4812}
4813
4814
4815/**
4816 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4817 * exception prevents it.
4818 *
4819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4820 * @param pResult The FPU operation result to push.
4821 * @param pFpuCtx The FPU context.
4822 */
4823static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4824{
4825 /* Update FSW and bail if there are pending exceptions afterwards. */
4826 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4827 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4828 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4829 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4830 {
4831 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4832 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4833 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4834 pFpuCtx->FSW = fFsw;
4835 return;
4836 }
4837
4838 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4839 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4840 {
4841 /* All is fine, push the actual value. */
4842 pFpuCtx->FTW |= RT_BIT(iNewTop);
4843 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4844 }
4845 else if (pFpuCtx->FCW & X86_FCW_IM)
4846 {
4847 /* Masked stack overflow, push QNaN. */
4848 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4849 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4850 }
4851 else
4852 {
4853 /* Raise stack overflow, don't push anything. */
4854 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4855 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4856 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4857 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4858 return;
4859 }
4860
4861 fFsw &= ~X86_FSW_TOP_MASK;
4862 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4863 pFpuCtx->FSW = fFsw;
4864
4865 iemFpuRotateStackPush(pFpuCtx);
4866 RT_NOREF(pVCpu);
4867}
4868
4869
4870/**
4871 * Stores a result in a FPU register and updates the FSW and FTW.
4872 *
4873 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4874 * @param pFpuCtx The FPU context.
4875 * @param pResult The result to store.
4876 * @param iStReg Which FPU register to store it in.
4877 */
4878static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4879{
4880 Assert(iStReg < 8);
4881 uint16_t fNewFsw = pFpuCtx->FSW;
4882 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4883 fNewFsw &= ~X86_FSW_C_MASK;
4884 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4885 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4886 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4887 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4888 pFpuCtx->FSW = fNewFsw;
4889 pFpuCtx->FTW |= RT_BIT(iReg);
4890 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4891 RT_NOREF(pVCpu);
4892}
4893
4894
4895/**
4896 * Only updates the FPU status word (FSW) with the result of the current
4897 * instruction.
4898 *
4899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4900 * @param pFpuCtx The FPU context.
4901 * @param u16FSW The FSW output of the current instruction.
4902 */
4903static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4904{
4905 uint16_t fNewFsw = pFpuCtx->FSW;
4906 fNewFsw &= ~X86_FSW_C_MASK;
4907 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4908 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4909 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4910 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4911 pFpuCtx->FSW = fNewFsw;
4912 RT_NOREF(pVCpu);
4913}
4914
4915
4916/**
4917 * Pops one item off the FPU stack if no pending exception prevents it.
4918 *
4919 * @param pFpuCtx The FPU context.
4920 */
4921static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4922{
4923 /* Check pending exceptions. */
4924 uint16_t uFSW = pFpuCtx->FSW;
4925 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4926 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4927 return;
4928
4929 /* TOP--. */
4930 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4931 uFSW &= ~X86_FSW_TOP_MASK;
4932 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4933 pFpuCtx->FSW = uFSW;
4934
4935 /* Mark the previous ST0 as empty. */
4936 iOldTop >>= X86_FSW_TOP_SHIFT;
4937 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4938
4939 /* Rotate the registers. */
4940 iemFpuRotateStackPop(pFpuCtx);
4941}
4942
4943
4944/**
4945 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4946 *
4947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4948 * @param pResult The FPU operation result to push.
4949 * @param uFpuOpcode The FPU opcode value.
4950 */
4951void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4952{
4953 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4954 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4955 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4956}
4957
4958
4959/**
4960 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4961 * and sets FPUDP and FPUDS.
4962 *
4963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4964 * @param pResult The FPU operation result to push.
4965 * @param iEffSeg The effective segment register.
4966 * @param GCPtrEff The effective address relative to @a iEffSeg.
4967 * @param uFpuOpcode The FPU opcode value.
4968 */
4969void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
4970 uint16_t uFpuOpcode) RT_NOEXCEPT
4971{
4972 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4973 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4974 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4975 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4976}
4977
4978
4979/**
4980 * Replace ST0 with the first value and push the second onto the FPU stack,
4981 * unless a pending exception prevents it.
4982 *
4983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4984 * @param pResult The FPU operation result to store and push.
4985 * @param uFpuOpcode The FPU opcode value.
4986 */
4987void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4988{
4989 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4990 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4991
4992 /* Update FSW and bail if there are pending exceptions afterwards. */
4993 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4994 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4995 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4996 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4997 {
4998 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4999 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5000 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5001 pFpuCtx->FSW = fFsw;
5002 return;
5003 }
5004
5005 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5006 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5007 {
5008 /* All is fine, push the actual value. */
5009 pFpuCtx->FTW |= RT_BIT(iNewTop);
5010 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5011 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5012 }
5013 else if (pFpuCtx->FCW & X86_FCW_IM)
5014 {
5015 /* Masked stack overflow, push QNaN. */
5016 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5017 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5018 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5019 }
5020 else
5021 {
5022 /* Raise stack overflow, don't push anything. */
5023 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5024 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5025 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5026 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5027 return;
5028 }
5029
5030 fFsw &= ~X86_FSW_TOP_MASK;
5031 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5032 pFpuCtx->FSW = fFsw;
5033
5034 iemFpuRotateStackPush(pFpuCtx);
5035}
5036
5037
5038/**
5039 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5040 * FOP.
5041 *
5042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5043 * @param pResult The result to store.
5044 * @param iStReg Which FPU register to store it in.
5045 * @param uFpuOpcode The FPU opcode value.
5046 */
5047void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5048{
5049 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5050 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5051 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5052}
5053
5054
5055/**
5056 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5057 * FOP, and then pops the stack.
5058 *
5059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5060 * @param pResult The result to store.
5061 * @param iStReg Which FPU register to store it in.
5062 * @param uFpuOpcode The FPU opcode value.
5063 */
5064void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5065{
5066 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5067 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5068 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5069 iemFpuMaybePopOne(pFpuCtx);
5070}
5071
5072
5073/**
5074 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5075 * FPUDP, and FPUDS.
5076 *
5077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5078 * @param pResult The result to store.
5079 * @param iStReg Which FPU register to store it in.
5080 * @param iEffSeg The effective memory operand selector register.
5081 * @param GCPtrEff The effective memory operand offset.
5082 * @param uFpuOpcode The FPU opcode value.
5083 */
5084void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5085 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5086{
5087 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5088 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5089 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5090 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5091}
5092
5093
5094/**
5095 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5096 * FPUDP, and FPUDS, and then pops the stack.
5097 *
5098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5099 * @param pResult The result to store.
5100 * @param iStReg Which FPU register to store it in.
5101 * @param iEffSeg The effective memory operand selector register.
5102 * @param GCPtrEff The effective memory operand offset.
5103 * @param uFpuOpcode The FPU opcode value.
5104 */
5105void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5106 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5107{
5108 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5109 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5110 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5111 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5112 iemFpuMaybePopOne(pFpuCtx);
5113}
5114
5115
5116/**
5117 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5118 *
5119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5120 * @param uFpuOpcode The FPU opcode value.
5121 */
5122void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5123{
5124 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5125 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5126}
5127
5128
5129/**
5130 * Updates the FSW, FOP, FPUIP, and FPUCS.
5131 *
5132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5133 * @param u16FSW The FSW from the current instruction.
5134 * @param uFpuOpcode The FPU opcode value.
5135 */
5136void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5137{
5138 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5139 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5140 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5141}
5142
5143
5144/**
5145 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5146 *
5147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5148 * @param u16FSW The FSW from the current instruction.
5149 * @param uFpuOpcode The FPU opcode value.
5150 */
5151void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5152{
5153 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5154 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5155 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5156 iemFpuMaybePopOne(pFpuCtx);
5157}
5158
5159
5160/**
5161 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5162 *
5163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5164 * @param u16FSW The FSW from the current instruction.
5165 * @param iEffSeg The effective memory operand selector register.
5166 * @param GCPtrEff The effective memory operand offset.
5167 * @param uFpuOpcode The FPU opcode value.
5168 */
5169void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5170{
5171 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5172 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5173 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5174 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5175}
5176
5177
5178/**
5179 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5180 *
5181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5182 * @param u16FSW The FSW from the current instruction.
5183 * @param uFpuOpcode The FPU opcode value.
5184 */
5185void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5186{
5187 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5188 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5189 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5190 iemFpuMaybePopOne(pFpuCtx);
5191 iemFpuMaybePopOne(pFpuCtx);
5192}
5193
5194
5195/**
5196 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5197 *
5198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5199 * @param u16FSW The FSW from the current instruction.
5200 * @param iEffSeg The effective memory operand selector register.
5201 * @param GCPtrEff The effective memory operand offset.
5202 * @param uFpuOpcode The FPU opcode value.
5203 */
5204void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5205 uint16_t uFpuOpcode) RT_NOEXCEPT
5206{
5207 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5208 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5209 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5210 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5211 iemFpuMaybePopOne(pFpuCtx);
5212}
5213
5214
5215/**
5216 * Worker routine for raising an FPU stack underflow exception.
5217 *
5218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5219 * @param pFpuCtx The FPU context.
5220 * @param iStReg The stack register being accessed.
5221 */
5222static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5223{
5224 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5225 if (pFpuCtx->FCW & X86_FCW_IM)
5226 {
5227 /* Masked underflow. */
5228 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5229 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5230 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5231 if (iStReg != UINT8_MAX)
5232 {
5233 pFpuCtx->FTW |= RT_BIT(iReg);
5234 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5235 }
5236 }
5237 else
5238 {
5239 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5240 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5241 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5242 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5243 }
5244 RT_NOREF(pVCpu);
5245}
5246
5247
5248/**
5249 * Raises a FPU stack underflow exception.
5250 *
5251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5252 * @param iStReg The destination register that should be loaded
5253 * with QNaN if \#IS is not masked. Specify
5254 * UINT8_MAX if none (like for fcom).
5255 * @param uFpuOpcode The FPU opcode value.
5256 */
5257void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5258{
5259 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5260 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5261 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5262}
5263
5264
5265void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5266{
5267 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5268 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5269 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5270 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5271}
5272
5273
5274void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5275{
5276 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5277 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5278 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5279 iemFpuMaybePopOne(pFpuCtx);
5280}
5281
5282
5283void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5284 uint16_t uFpuOpcode) RT_NOEXCEPT
5285{
5286 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5287 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5288 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5289 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5290 iemFpuMaybePopOne(pFpuCtx);
5291}
5292
5293
5294void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5295{
5296 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5297 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5298 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5299 iemFpuMaybePopOne(pFpuCtx);
5300 iemFpuMaybePopOne(pFpuCtx);
5301}
5302
5303
5304void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5305{
5306 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5307 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5308
5309 if (pFpuCtx->FCW & X86_FCW_IM)
5310 {
5311 /* Masked overflow - Push QNaN. */
5312 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5313 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5314 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5315 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5316 pFpuCtx->FTW |= RT_BIT(iNewTop);
5317 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5318 iemFpuRotateStackPush(pFpuCtx);
5319 }
5320 else
5321 {
5322 /* Exception pending - don't change TOP or the register stack. */
5323 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5324 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5325 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5326 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5327 }
5328}
5329
5330
5331void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5332{
5333 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5334 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5335
5336 if (pFpuCtx->FCW & X86_FCW_IM)
5337 {
5338 /* Masked overflow - Push QNaN. */
5339 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5340 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5341 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5342 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5343 pFpuCtx->FTW |= RT_BIT(iNewTop);
5344 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5345 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5346 iemFpuRotateStackPush(pFpuCtx);
5347 }
5348 else
5349 {
5350 /* Exception pending - don't change TOP or the register stack. */
5351 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5352 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5353 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5354 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5355 }
5356}
5357
5358
5359/**
5360 * Worker routine for raising an FPU stack overflow exception on a push.
5361 *
5362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5363 * @param pFpuCtx The FPU context.
5364 */
5365static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5366{
5367 if (pFpuCtx->FCW & X86_FCW_IM)
5368 {
5369 /* Masked overflow. */
5370 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5371 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5372 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5373 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5374 pFpuCtx->FTW |= RT_BIT(iNewTop);
5375 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5376 iemFpuRotateStackPush(pFpuCtx);
5377 }
5378 else
5379 {
5380 /* Exception pending - don't change TOP or the register stack. */
5381 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5382 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5383 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5384 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5385 }
5386 RT_NOREF(pVCpu);
5387}
5388
5389
5390/**
5391 * Raises a FPU stack overflow exception on a push.
5392 *
5393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5394 * @param uFpuOpcode The FPU opcode value.
5395 */
5396void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5397{
5398 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5399 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5400 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5401}
5402
5403
5404/**
5405 * Raises a FPU stack overflow exception on a push with a memory operand.
5406 *
5407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5408 * @param iEffSeg The effective memory operand selector register.
5409 * @param GCPtrEff The effective memory operand offset.
5410 * @param uFpuOpcode The FPU opcode value.
5411 */
5412void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5413{
5414 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5415 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5416 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5417 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5418}
5419
5420/** @} */
5421
5422
5423/** @name SSE+AVX SIMD access and helpers.
5424 *
5425 * @{
5426 */
5427/**
5428 * Stores a result in a SIMD XMM register, updates the MXCSR.
5429 *
5430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5431 * @param pResult The result to store.
5432 * @param iXmmReg Which SIMD XMM register to store the result in.
5433 */
5434void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5435{
5436 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5437 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5438
5439 /* The result is only updated if there is no unmasked exception pending. */
5440 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5441 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5442 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5443}
5444
5445
5446/**
5447 * Updates the MXCSR.
5448 *
5449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5450 * @param fMxcsr The new MXCSR value.
5451 */
5452void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5453{
5454 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5455 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5456}
5457/** @} */
5458
5459
5460/** @name Memory access.
5461 *
5462 * @{
5463 */
5464
5465
5466/**
5467 * Updates the IEMCPU::cbWritten counter if applicable.
5468 *
5469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5470 * @param fAccess The access being accounted for.
5471 * @param cbMem The access size.
5472 */
5473DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5474{
5475 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5476 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5477 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5478}
5479
5480
5481/**
5482 * Applies the segment limit, base and attributes.
5483 *
5484 * This may raise a \#GP or \#SS.
5485 *
5486 * @returns VBox strict status code.
5487 *
5488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5489 * @param fAccess The kind of access which is being performed.
5490 * @param iSegReg The index of the segment register to apply.
5491 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5492 * TSS, ++).
5493 * @param cbMem The access size.
5494 * @param pGCPtrMem Pointer to the guest memory address to apply
5495 * segmentation to. Input and output parameter.
5496 */
5497VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5498{
5499 if (iSegReg == UINT8_MAX)
5500 return VINF_SUCCESS;
5501
5502 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5503 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5504 switch (IEM_GET_CPU_MODE(pVCpu))
5505 {
5506 case IEMMODE_16BIT:
5507 case IEMMODE_32BIT:
5508 {
5509 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5510 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5511
5512 if ( pSel->Attr.n.u1Present
5513 && !pSel->Attr.n.u1Unusable)
5514 {
5515 Assert(pSel->Attr.n.u1DescType);
5516 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5517 {
5518 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5519 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5520 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5521
5522 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5523 {
5524 /** @todo CPL check. */
5525 }
5526
5527 /*
5528 * There are two kinds of data selectors, normal and expand down.
5529 */
5530 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5531 {
5532 if ( GCPtrFirst32 > pSel->u32Limit
5533 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5534 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5535 }
5536 else
5537 {
5538 /*
5539 * The upper boundary is defined by the B bit, not the G bit!
5540 */
5541 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5542 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5543 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5544 }
5545 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5546 }
5547 else
5548 {
5549 /*
5550 * Code selector and usually be used to read thru, writing is
5551 * only permitted in real and V8086 mode.
5552 */
5553 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5554 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5555 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5556 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5557 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5558
5559 if ( GCPtrFirst32 > pSel->u32Limit
5560 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5561 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5562
5563 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5564 {
5565 /** @todo CPL check. */
5566 }
5567
5568 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5569 }
5570 }
5571 else
5572 return iemRaiseGeneralProtectionFault0(pVCpu);
5573 return VINF_SUCCESS;
5574 }
5575
5576 case IEMMODE_64BIT:
5577 {
5578 RTGCPTR GCPtrMem = *pGCPtrMem;
5579 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5580 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5581
5582 Assert(cbMem >= 1);
5583 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5584 return VINF_SUCCESS;
5585 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5586 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5587 return iemRaiseGeneralProtectionFault0(pVCpu);
5588 }
5589
5590 default:
5591 AssertFailedReturn(VERR_IEM_IPE_7);
5592 }
5593}
5594
5595
5596/**
5597 * Translates a virtual address to a physical physical address and checks if we
5598 * can access the page as specified.
5599 *
5600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5601 * @param GCPtrMem The virtual address.
5602 * @param cbAccess The access size, for raising \#PF correctly for
5603 * FXSAVE and such.
5604 * @param fAccess The intended access.
5605 * @param pGCPhysMem Where to return the physical address.
5606 */
5607VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5608 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5609{
5610 /** @todo Need a different PGM interface here. We're currently using
5611 * generic / REM interfaces. this won't cut it for R0. */
5612 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5613 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5614 * here. */
5615 PGMPTWALK Walk;
5616 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5617 if (RT_FAILURE(rc))
5618 {
5619 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5620 /** @todo Check unassigned memory in unpaged mode. */
5621 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5622#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5623 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5624 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5625#endif
5626 *pGCPhysMem = NIL_RTGCPHYS;
5627 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5628 }
5629
5630 /* If the page is writable and does not have the no-exec bit set, all
5631 access is allowed. Otherwise we'll have to check more carefully... */
5632 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5633 {
5634 /* Write to read only memory? */
5635 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5636 && !(Walk.fEffective & X86_PTE_RW)
5637 && ( ( IEM_GET_CPL(pVCpu) == 3
5638 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5639 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5640 {
5641 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5642 *pGCPhysMem = NIL_RTGCPHYS;
5643#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5644 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5645 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5646#endif
5647 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5648 }
5649
5650 /* Kernel memory accessed by userland? */
5651 if ( !(Walk.fEffective & X86_PTE_US)
5652 && IEM_GET_CPL(pVCpu) == 3
5653 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5654 {
5655 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5656 *pGCPhysMem = NIL_RTGCPHYS;
5657#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5658 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5659 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5660#endif
5661 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5662 }
5663
5664 /* Executing non-executable memory? */
5665 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5666 && (Walk.fEffective & X86_PTE_PAE_NX)
5667 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5668 {
5669 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5670 *pGCPhysMem = NIL_RTGCPHYS;
5671#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5672 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5673 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5674#endif
5675 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5676 VERR_ACCESS_DENIED);
5677 }
5678 }
5679
5680 /*
5681 * Set the dirty / access flags.
5682 * ASSUMES this is set when the address is translated rather than on committ...
5683 */
5684 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5685 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5686 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5687 {
5688 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5689 AssertRC(rc2);
5690 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5691 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5692 }
5693
5694 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5695 *pGCPhysMem = GCPhys;
5696 return VINF_SUCCESS;
5697}
5698
5699
5700/**
5701 * Looks up a memory mapping entry.
5702 *
5703 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5705 * @param pvMem The memory address.
5706 * @param fAccess The access to.
5707 */
5708DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5709{
5710 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5711 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5712 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5713 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5714 return 0;
5715 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5716 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5717 return 1;
5718 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5719 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5720 return 2;
5721 return VERR_NOT_FOUND;
5722}
5723
5724
5725/**
5726 * Finds a free memmap entry when using iNextMapping doesn't work.
5727 *
5728 * @returns Memory mapping index, 1024 on failure.
5729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5730 */
5731static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5732{
5733 /*
5734 * The easy case.
5735 */
5736 if (pVCpu->iem.s.cActiveMappings == 0)
5737 {
5738 pVCpu->iem.s.iNextMapping = 1;
5739 return 0;
5740 }
5741
5742 /* There should be enough mappings for all instructions. */
5743 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5744
5745 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5746 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5747 return i;
5748
5749 AssertFailedReturn(1024);
5750}
5751
5752
5753/**
5754 * Commits a bounce buffer that needs writing back and unmaps it.
5755 *
5756 * @returns Strict VBox status code.
5757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5758 * @param iMemMap The index of the buffer to commit.
5759 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5760 * Always false in ring-3, obviously.
5761 */
5762static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5763{
5764 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5765 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5766#ifdef IN_RING3
5767 Assert(!fPostponeFail);
5768 RT_NOREF_PV(fPostponeFail);
5769#endif
5770
5771 /*
5772 * Do the writing.
5773 */
5774 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5775 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5776 {
5777 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5778 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5779 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5780 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5781 {
5782 /*
5783 * Carefully and efficiently dealing with access handler return
5784 * codes make this a little bloated.
5785 */
5786 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5787 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5788 pbBuf,
5789 cbFirst,
5790 PGMACCESSORIGIN_IEM);
5791 if (rcStrict == VINF_SUCCESS)
5792 {
5793 if (cbSecond)
5794 {
5795 rcStrict = PGMPhysWrite(pVM,
5796 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5797 pbBuf + cbFirst,
5798 cbSecond,
5799 PGMACCESSORIGIN_IEM);
5800 if (rcStrict == VINF_SUCCESS)
5801 { /* nothing */ }
5802 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5803 {
5804 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5805 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5806 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5807 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5808 }
5809#ifndef IN_RING3
5810 else if (fPostponeFail)
5811 {
5812 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5813 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5814 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5815 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5816 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5817 return iemSetPassUpStatus(pVCpu, rcStrict);
5818 }
5819#endif
5820 else
5821 {
5822 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5823 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5824 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5825 return rcStrict;
5826 }
5827 }
5828 }
5829 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5830 {
5831 if (!cbSecond)
5832 {
5833 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5834 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5835 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5836 }
5837 else
5838 {
5839 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5840 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5841 pbBuf + cbFirst,
5842 cbSecond,
5843 PGMACCESSORIGIN_IEM);
5844 if (rcStrict2 == VINF_SUCCESS)
5845 {
5846 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5847 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5848 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5849 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5850 }
5851 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5852 {
5853 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5854 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5855 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5856 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5857 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5858 }
5859#ifndef IN_RING3
5860 else if (fPostponeFail)
5861 {
5862 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5863 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5864 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5865 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5866 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5867 return iemSetPassUpStatus(pVCpu, rcStrict);
5868 }
5869#endif
5870 else
5871 {
5872 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5873 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5874 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5875 return rcStrict2;
5876 }
5877 }
5878 }
5879#ifndef IN_RING3
5880 else if (fPostponeFail)
5881 {
5882 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5883 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5884 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5885 if (!cbSecond)
5886 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5887 else
5888 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5889 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5890 return iemSetPassUpStatus(pVCpu, rcStrict);
5891 }
5892#endif
5893 else
5894 {
5895 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5896 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5897 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5898 return rcStrict;
5899 }
5900 }
5901 else
5902 {
5903 /*
5904 * No access handlers, much simpler.
5905 */
5906 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5907 if (RT_SUCCESS(rc))
5908 {
5909 if (cbSecond)
5910 {
5911 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5912 if (RT_SUCCESS(rc))
5913 { /* likely */ }
5914 else
5915 {
5916 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5917 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5918 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5919 return rc;
5920 }
5921 }
5922 }
5923 else
5924 {
5925 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5926 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5927 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5928 return rc;
5929 }
5930 }
5931 }
5932
5933#if defined(IEM_LOG_MEMORY_WRITES)
5934 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5935 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5936 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5937 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5938 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5939 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5940
5941 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5942 g_cbIemWrote = cbWrote;
5943 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5944#endif
5945
5946 /*
5947 * Free the mapping entry.
5948 */
5949 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5950 Assert(pVCpu->iem.s.cActiveMappings != 0);
5951 pVCpu->iem.s.cActiveMappings--;
5952 return VINF_SUCCESS;
5953}
5954
5955
5956/**
5957 * iemMemMap worker that deals with a request crossing pages.
5958 */
5959static VBOXSTRICTRC
5960iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5961{
5962 Assert(cbMem <= GUEST_PAGE_SIZE);
5963
5964 /*
5965 * Do the address translations.
5966 */
5967 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5968 RTGCPHYS GCPhysFirst;
5969 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5970 if (rcStrict != VINF_SUCCESS)
5971 return rcStrict;
5972 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5973
5974 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5975 RTGCPHYS GCPhysSecond;
5976 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5977 cbSecondPage, fAccess, &GCPhysSecond);
5978 if (rcStrict != VINF_SUCCESS)
5979 return rcStrict;
5980 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5981 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5982
5983 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5984
5985 /*
5986 * Read in the current memory content if it's a read, execute or partial
5987 * write access.
5988 */
5989 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5990
5991 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5992 {
5993 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5994 {
5995 /*
5996 * Must carefully deal with access handler status codes here,
5997 * makes the code a bit bloated.
5998 */
5999 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6000 if (rcStrict == VINF_SUCCESS)
6001 {
6002 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6003 if (rcStrict == VINF_SUCCESS)
6004 { /*likely */ }
6005 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6006 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6007 else
6008 {
6009 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6010 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6011 return rcStrict;
6012 }
6013 }
6014 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6015 {
6016 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6017 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6018 {
6019 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6020 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6021 }
6022 else
6023 {
6024 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6025 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6026 return rcStrict2;
6027 }
6028 }
6029 else
6030 {
6031 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6032 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6033 return rcStrict;
6034 }
6035 }
6036 else
6037 {
6038 /*
6039 * No informational status codes here, much more straight forward.
6040 */
6041 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6042 if (RT_SUCCESS(rc))
6043 {
6044 Assert(rc == VINF_SUCCESS);
6045 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6046 if (RT_SUCCESS(rc))
6047 Assert(rc == VINF_SUCCESS);
6048 else
6049 {
6050 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6051 return rc;
6052 }
6053 }
6054 else
6055 {
6056 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6057 return rc;
6058 }
6059 }
6060 }
6061#ifdef VBOX_STRICT
6062 else
6063 memset(pbBuf, 0xcc, cbMem);
6064 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6065 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6066#endif
6067 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6068
6069 /*
6070 * Commit the bounce buffer entry.
6071 */
6072 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6073 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6074 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6075 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6076 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6077 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6078 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6079 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6080 pVCpu->iem.s.cActiveMappings++;
6081
6082 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6083 *ppvMem = pbBuf;
6084 return VINF_SUCCESS;
6085}
6086
6087
6088/**
6089 * iemMemMap woker that deals with iemMemPageMap failures.
6090 */
6091static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6092 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6093{
6094 /*
6095 * Filter out conditions we can handle and the ones which shouldn't happen.
6096 */
6097 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6098 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6099 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6100 {
6101 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6102 return rcMap;
6103 }
6104 pVCpu->iem.s.cPotentialExits++;
6105
6106 /*
6107 * Read in the current memory content if it's a read, execute or partial
6108 * write access.
6109 */
6110 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6111 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6112 {
6113 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6114 memset(pbBuf, 0xff, cbMem);
6115 else
6116 {
6117 int rc;
6118 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6119 {
6120 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6121 if (rcStrict == VINF_SUCCESS)
6122 { /* nothing */ }
6123 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6124 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6125 else
6126 {
6127 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6128 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6129 return rcStrict;
6130 }
6131 }
6132 else
6133 {
6134 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6135 if (RT_SUCCESS(rc))
6136 { /* likely */ }
6137 else
6138 {
6139 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6140 GCPhysFirst, rc));
6141 return rc;
6142 }
6143 }
6144 }
6145 }
6146#ifdef VBOX_STRICT
6147 else
6148 memset(pbBuf, 0xcc, cbMem);
6149#endif
6150#ifdef VBOX_STRICT
6151 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6152 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6153#endif
6154
6155 /*
6156 * Commit the bounce buffer entry.
6157 */
6158 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6159 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6160 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6161 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6162 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6163 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6164 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6165 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6166 pVCpu->iem.s.cActiveMappings++;
6167
6168 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6169 *ppvMem = pbBuf;
6170 return VINF_SUCCESS;
6171}
6172
6173
6174
6175/**
6176 * Maps the specified guest memory for the given kind of access.
6177 *
6178 * This may be using bounce buffering of the memory if it's crossing a page
6179 * boundary or if there is an access handler installed for any of it. Because
6180 * of lock prefix guarantees, we're in for some extra clutter when this
6181 * happens.
6182 *
6183 * This may raise a \#GP, \#SS, \#PF or \#AC.
6184 *
6185 * @returns VBox strict status code.
6186 *
6187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6188 * @param ppvMem Where to return the pointer to the mapped memory.
6189 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6190 * 8, 12, 16, 32 or 512. When used by string operations
6191 * it can be up to a page.
6192 * @param iSegReg The index of the segment register to use for this
6193 * access. The base and limits are checked. Use UINT8_MAX
6194 * to indicate that no segmentation is required (for IDT,
6195 * GDT and LDT accesses).
6196 * @param GCPtrMem The address of the guest memory.
6197 * @param fAccess How the memory is being accessed. The
6198 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6199 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6200 * when raising exceptions.
6201 * @param uAlignCtl Alignment control:
6202 * - Bits 15:0 is the alignment mask.
6203 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6204 * IEM_MEMMAP_F_ALIGN_SSE, and
6205 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6206 * Pass zero to skip alignment.
6207 */
6208VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6209 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6210{
6211 /*
6212 * Check the input and figure out which mapping entry to use.
6213 */
6214 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6215 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6216 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6217 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6218 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6219
6220 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6221 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6222 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6223 {
6224 iMemMap = iemMemMapFindFree(pVCpu);
6225 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6226 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6227 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6228 pVCpu->iem.s.aMemMappings[2].fAccess),
6229 VERR_IEM_IPE_9);
6230 }
6231
6232 /*
6233 * Map the memory, checking that we can actually access it. If something
6234 * slightly complicated happens, fall back on bounce buffering.
6235 */
6236 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6237 if (rcStrict == VINF_SUCCESS)
6238 { /* likely */ }
6239 else
6240 return rcStrict;
6241
6242 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6243 { /* likely */ }
6244 else
6245 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6246
6247 /*
6248 * Alignment check.
6249 */
6250 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6251 { /* likelyish */ }
6252 else
6253 {
6254 /* Misaligned access. */
6255 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6256 {
6257 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6258 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6259 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6260 {
6261 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6262
6263 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6264 return iemRaiseAlignmentCheckException(pVCpu);
6265 }
6266 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6267 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6268 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6269 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6270 * that's what FXSAVE does on a 10980xe. */
6271 && iemMemAreAlignmentChecksEnabled(pVCpu))
6272 return iemRaiseAlignmentCheckException(pVCpu);
6273 else
6274 return iemRaiseGeneralProtectionFault0(pVCpu);
6275 }
6276 }
6277
6278#ifdef IEM_WITH_DATA_TLB
6279 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6280
6281 /*
6282 * Get the TLB entry for this page.
6283 */
6284 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6285 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6286 if (pTlbe->uTag == uTag)
6287 {
6288# ifdef VBOX_WITH_STATISTICS
6289 pVCpu->iem.s.DataTlb.cTlbHits++;
6290# endif
6291 }
6292 else
6293 {
6294 pVCpu->iem.s.DataTlb.cTlbMisses++;
6295 PGMPTWALK Walk;
6296 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6297 if (RT_FAILURE(rc))
6298 {
6299 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6300# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6301 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6302 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6303# endif
6304 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6305 }
6306
6307 Assert(Walk.fSucceeded);
6308 pTlbe->uTag = uTag;
6309 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6310 pTlbe->GCPhys = Walk.GCPhys;
6311 pTlbe->pbMappingR3 = NULL;
6312 }
6313
6314 /*
6315 * Check TLB page table level access flags.
6316 */
6317 /* If the page is either supervisor only or non-writable, we need to do
6318 more careful access checks. */
6319 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6320 {
6321 /* Write to read only memory? */
6322 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6323 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6324 && ( ( IEM_GET_CPL(pVCpu) == 3
6325 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6326 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6327 {
6328 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6329# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6330 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6331 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6332# endif
6333 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6334 }
6335
6336 /* Kernel memory accessed by userland? */
6337 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6338 && IEM_GET_CPL(pVCpu) == 3
6339 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6340 {
6341 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6342# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6343 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6344 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6345# endif
6346 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6347 }
6348 }
6349
6350 /*
6351 * Set the dirty / access flags.
6352 * ASSUMES this is set when the address is translated rather than on commit...
6353 */
6354 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6355 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6356 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6357 {
6358 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6359 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6360 AssertRC(rc2);
6361 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6362 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6363 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6364 }
6365
6366 /*
6367 * Look up the physical page info if necessary.
6368 */
6369 uint8_t *pbMem = NULL;
6370 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6371# ifdef IN_RING3
6372 pbMem = pTlbe->pbMappingR3;
6373# else
6374 pbMem = NULL;
6375# endif
6376 else
6377 {
6378 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6379 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6380 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6381 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6382 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6383 { /* likely */ }
6384 else
6385 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6386 pTlbe->pbMappingR3 = NULL;
6387 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6388 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6389 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6390 &pbMem, &pTlbe->fFlagsAndPhysRev);
6391 AssertRCReturn(rc, rc);
6392# ifdef IN_RING3
6393 pTlbe->pbMappingR3 = pbMem;
6394# endif
6395 }
6396
6397 /*
6398 * Check the physical page level access and mapping.
6399 */
6400 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6401 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6402 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6403 { /* probably likely */ }
6404 else
6405 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6406 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6407 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6408 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6409 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6410 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6411
6412 if (pbMem)
6413 {
6414 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6415 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6416 fAccess |= IEM_ACCESS_NOT_LOCKED;
6417 }
6418 else
6419 {
6420 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6421 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6422 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6423 if (rcStrict != VINF_SUCCESS)
6424 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6425 }
6426
6427 void * const pvMem = pbMem;
6428
6429 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6430 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6431 if (fAccess & IEM_ACCESS_TYPE_READ)
6432 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6433
6434#else /* !IEM_WITH_DATA_TLB */
6435
6436 RTGCPHYS GCPhysFirst;
6437 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6438 if (rcStrict != VINF_SUCCESS)
6439 return rcStrict;
6440
6441 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6442 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6443 if (fAccess & IEM_ACCESS_TYPE_READ)
6444 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6445
6446 void *pvMem;
6447 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6448 if (rcStrict != VINF_SUCCESS)
6449 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6450
6451#endif /* !IEM_WITH_DATA_TLB */
6452
6453 /*
6454 * Fill in the mapping table entry.
6455 */
6456 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6457 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6458 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6459 pVCpu->iem.s.cActiveMappings += 1;
6460
6461 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6462 *ppvMem = pvMem;
6463
6464 return VINF_SUCCESS;
6465}
6466
6467
6468/**
6469 * Commits the guest memory if bounce buffered and unmaps it.
6470 *
6471 * @returns Strict VBox status code.
6472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6473 * @param pvMem The mapping.
6474 * @param fAccess The kind of access.
6475 */
6476VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6477{
6478 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6479 AssertReturn(iMemMap >= 0, iMemMap);
6480
6481 /* If it's bounce buffered, we may need to write back the buffer. */
6482 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6483 {
6484 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6485 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6486 }
6487 /* Otherwise unlock it. */
6488 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6489 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6490
6491 /* Free the entry. */
6492 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6493 Assert(pVCpu->iem.s.cActiveMappings != 0);
6494 pVCpu->iem.s.cActiveMappings--;
6495 return VINF_SUCCESS;
6496}
6497
6498#ifdef IEM_WITH_SETJMP
6499
6500/**
6501 * Maps the specified guest memory for the given kind of access, longjmp on
6502 * error.
6503 *
6504 * This may be using bounce buffering of the memory if it's crossing a page
6505 * boundary or if there is an access handler installed for any of it. Because
6506 * of lock prefix guarantees, we're in for some extra clutter when this
6507 * happens.
6508 *
6509 * This may raise a \#GP, \#SS, \#PF or \#AC.
6510 *
6511 * @returns Pointer to the mapped memory.
6512 *
6513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6514 * @param cbMem The number of bytes to map. This is usually 1,
6515 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6516 * string operations it can be up to a page.
6517 * @param iSegReg The index of the segment register to use for
6518 * this access. The base and limits are checked.
6519 * Use UINT8_MAX to indicate that no segmentation
6520 * is required (for IDT, GDT and LDT accesses).
6521 * @param GCPtrMem The address of the guest memory.
6522 * @param fAccess How the memory is being accessed. The
6523 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6524 * how to map the memory, while the
6525 * IEM_ACCESS_WHAT_XXX bit is used when raising
6526 * exceptions.
6527 * @param uAlignCtl Alignment control:
6528 * - Bits 15:0 is the alignment mask.
6529 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6530 * IEM_MEMMAP_F_ALIGN_SSE, and
6531 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6532 * Pass zero to skip alignment.
6533 */
6534void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6535 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6536{
6537 /*
6538 * Check the input, check segment access and adjust address
6539 * with segment base.
6540 */
6541 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6542 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6543 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6544
6545 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6546 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6547 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6548
6549 /*
6550 * Alignment check.
6551 */
6552 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6553 { /* likelyish */ }
6554 else
6555 {
6556 /* Misaligned access. */
6557 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6558 {
6559 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6560 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6561 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6562 {
6563 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6564
6565 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6566 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6567 }
6568 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6569 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6570 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6571 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6572 * that's what FXSAVE does on a 10980xe. */
6573 && iemMemAreAlignmentChecksEnabled(pVCpu))
6574 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6575 else
6576 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6577 }
6578 }
6579
6580 /*
6581 * Figure out which mapping entry to use.
6582 */
6583 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6584 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6585 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6586 {
6587 iMemMap = iemMemMapFindFree(pVCpu);
6588 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6589 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6590 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6591 pVCpu->iem.s.aMemMappings[2].fAccess),
6592 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6593 }
6594
6595 /*
6596 * Crossing a page boundary?
6597 */
6598 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6599 { /* No (likely). */ }
6600 else
6601 {
6602 void *pvMem;
6603 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6604 if (rcStrict == VINF_SUCCESS)
6605 return pvMem;
6606 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6607 }
6608
6609#ifdef IEM_WITH_DATA_TLB
6610 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6611
6612 /*
6613 * Get the TLB entry for this page.
6614 */
6615 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6616 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6617 if (pTlbe->uTag == uTag)
6618 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6619 else
6620 {
6621 pVCpu->iem.s.DataTlb.cTlbMisses++;
6622 PGMPTWALK Walk;
6623 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6624 if (RT_FAILURE(rc))
6625 {
6626 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6627# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6628 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6629 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6630# endif
6631 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6632 }
6633
6634 Assert(Walk.fSucceeded);
6635 pTlbe->uTag = uTag;
6636 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6637 pTlbe->GCPhys = Walk.GCPhys;
6638 pTlbe->pbMappingR3 = NULL;
6639 }
6640
6641 /*
6642 * Check the flags and physical revision.
6643 */
6644 /** @todo make the caller pass these in with fAccess. */
6645 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6646 ? IEMTLBE_F_PT_NO_USER : 0;
6647 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6648 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6649 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6650 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6651 ? IEMTLBE_F_PT_NO_WRITE : 0)
6652 : 0;
6653 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6654 uint8_t *pbMem = NULL;
6655 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6656 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6657# ifdef IN_RING3
6658 pbMem = pTlbe->pbMappingR3;
6659# else
6660 pbMem = NULL;
6661# endif
6662 else
6663 {
6664 /*
6665 * Okay, something isn't quite right or needs refreshing.
6666 */
6667 /* Write to read only memory? */
6668 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6669 {
6670 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6671# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6672 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6673 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6674# endif
6675 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6676 }
6677
6678 /* Kernel memory accessed by userland? */
6679 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6680 {
6681 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6682# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6683 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6684 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6685# endif
6686 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6687 }
6688
6689 /* Set the dirty / access flags.
6690 ASSUMES this is set when the address is translated rather than on commit... */
6691 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6692 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6693 {
6694 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6695 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6696 AssertRC(rc2);
6697 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6698 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6699 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6700 }
6701
6702 /*
6703 * Check if the physical page info needs updating.
6704 */
6705 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6706# ifdef IN_RING3
6707 pbMem = pTlbe->pbMappingR3;
6708# else
6709 pbMem = NULL;
6710# endif
6711 else
6712 {
6713 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6714 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6715 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6716 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6717 pTlbe->pbMappingR3 = NULL;
6718 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6719 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6720 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6721 &pbMem, &pTlbe->fFlagsAndPhysRev);
6722 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6723# ifdef IN_RING3
6724 pTlbe->pbMappingR3 = pbMem;
6725# endif
6726 }
6727
6728 /*
6729 * Check the physical page level access and mapping.
6730 */
6731 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6732 { /* probably likely */ }
6733 else
6734 {
6735 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6736 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6737 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6738 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6739 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6740 if (rcStrict == VINF_SUCCESS)
6741 return pbMem;
6742 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6743 }
6744 }
6745 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6746
6747 if (pbMem)
6748 {
6749 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6750 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6751 fAccess |= IEM_ACCESS_NOT_LOCKED;
6752 }
6753 else
6754 {
6755 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6756 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6757 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6758 if (rcStrict == VINF_SUCCESS)
6759 return pbMem;
6760 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6761 }
6762
6763 void * const pvMem = pbMem;
6764
6765 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6766 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6767 if (fAccess & IEM_ACCESS_TYPE_READ)
6768 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6769
6770#else /* !IEM_WITH_DATA_TLB */
6771
6772
6773 RTGCPHYS GCPhysFirst;
6774 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6775 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6776 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6777
6778 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6779 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6780 if (fAccess & IEM_ACCESS_TYPE_READ)
6781 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6782
6783 void *pvMem;
6784 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6785 if (rcStrict == VINF_SUCCESS)
6786 { /* likely */ }
6787 else
6788 {
6789 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6790 if (rcStrict == VINF_SUCCESS)
6791 return pvMem;
6792 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6793 }
6794
6795#endif /* !IEM_WITH_DATA_TLB */
6796
6797 /*
6798 * Fill in the mapping table entry.
6799 */
6800 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6801 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6802 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6803 pVCpu->iem.s.cActiveMappings++;
6804
6805 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6806 return pvMem;
6807}
6808
6809
6810/**
6811 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6812 *
6813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6814 * @param pvMem The mapping.
6815 * @param fAccess The kind of access.
6816 */
6817void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6818{
6819 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6820 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6821
6822 /* If it's bounce buffered, we may need to write back the buffer. */
6823 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6824 {
6825 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6826 {
6827 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6828 if (rcStrict == VINF_SUCCESS)
6829 return;
6830 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6831 }
6832 }
6833 /* Otherwise unlock it. */
6834 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6835 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6836
6837 /* Free the entry. */
6838 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6839 Assert(pVCpu->iem.s.cActiveMappings != 0);
6840 pVCpu->iem.s.cActiveMappings--;
6841}
6842
6843#endif /* IEM_WITH_SETJMP */
6844
6845#ifndef IN_RING3
6846/**
6847 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6848 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6849 *
6850 * Allows the instruction to be completed and retired, while the IEM user will
6851 * return to ring-3 immediately afterwards and do the postponed writes there.
6852 *
6853 * @returns VBox status code (no strict statuses). Caller must check
6854 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6856 * @param pvMem The mapping.
6857 * @param fAccess The kind of access.
6858 */
6859VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6860{
6861 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6862 AssertReturn(iMemMap >= 0, iMemMap);
6863
6864 /* If it's bounce buffered, we may need to write back the buffer. */
6865 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6866 {
6867 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6868 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6869 }
6870 /* Otherwise unlock it. */
6871 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6872 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6873
6874 /* Free the entry. */
6875 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6876 Assert(pVCpu->iem.s.cActiveMappings != 0);
6877 pVCpu->iem.s.cActiveMappings--;
6878 return VINF_SUCCESS;
6879}
6880#endif
6881
6882
6883/**
6884 * Rollbacks mappings, releasing page locks and such.
6885 *
6886 * The caller shall only call this after checking cActiveMappings.
6887 *
6888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6889 */
6890void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6891{
6892 Assert(pVCpu->iem.s.cActiveMappings > 0);
6893
6894 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6895 while (iMemMap-- > 0)
6896 {
6897 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6898 if (fAccess != IEM_ACCESS_INVALID)
6899 {
6900 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6901 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6902 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6903 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6904 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6905 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6906 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6907 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6908 pVCpu->iem.s.cActiveMappings--;
6909 }
6910 }
6911}
6912
6913
6914/**
6915 * Fetches a data byte.
6916 *
6917 * @returns Strict VBox status code.
6918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6919 * @param pu8Dst Where to return the byte.
6920 * @param iSegReg The index of the segment register to use for
6921 * this access. The base and limits are checked.
6922 * @param GCPtrMem The address of the guest memory.
6923 */
6924VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6925{
6926 /* The lazy approach for now... */
6927 uint8_t const *pu8Src;
6928 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6929 if (rc == VINF_SUCCESS)
6930 {
6931 *pu8Dst = *pu8Src;
6932 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6933 }
6934 return rc;
6935}
6936
6937
6938#ifdef IEM_WITH_SETJMP
6939/**
6940 * Fetches a data byte, longjmp on error.
6941 *
6942 * @returns The byte.
6943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6944 * @param iSegReg The index of the segment register to use for
6945 * this access. The base and limits are checked.
6946 * @param GCPtrMem The address of the guest memory.
6947 */
6948uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6949{
6950 /* The lazy approach for now... */
6951 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6952 uint8_t const bRet = *pu8Src;
6953 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6954 return bRet;
6955}
6956#endif /* IEM_WITH_SETJMP */
6957
6958
6959/**
6960 * Fetches a data word.
6961 *
6962 * @returns Strict VBox status code.
6963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6964 * @param pu16Dst Where to return the word.
6965 * @param iSegReg The index of the segment register to use for
6966 * this access. The base and limits are checked.
6967 * @param GCPtrMem The address of the guest memory.
6968 */
6969VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6970{
6971 /* The lazy approach for now... */
6972 uint16_t const *pu16Src;
6973 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6974 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6975 if (rc == VINF_SUCCESS)
6976 {
6977 *pu16Dst = *pu16Src;
6978 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6979 }
6980 return rc;
6981}
6982
6983
6984#ifdef IEM_WITH_SETJMP
6985/**
6986 * Fetches a data word, longjmp on error.
6987 *
6988 * @returns The word
6989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6990 * @param iSegReg The index of the segment register to use for
6991 * this access. The base and limits are checked.
6992 * @param GCPtrMem The address of the guest memory.
6993 */
6994uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6995{
6996 /* The lazy approach for now... */
6997 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6998 sizeof(*pu16Src) - 1);
6999 uint16_t const u16Ret = *pu16Src;
7000 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7001 return u16Ret;
7002}
7003#endif
7004
7005
7006/**
7007 * Fetches a data dword.
7008 *
7009 * @returns Strict VBox status code.
7010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7011 * @param pu32Dst Where to return the dword.
7012 * @param iSegReg The index of the segment register to use for
7013 * this access. The base and limits are checked.
7014 * @param GCPtrMem The address of the guest memory.
7015 */
7016VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7017{
7018 /* The lazy approach for now... */
7019 uint32_t const *pu32Src;
7020 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7021 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7022 if (rc == VINF_SUCCESS)
7023 {
7024 *pu32Dst = *pu32Src;
7025 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7026 }
7027 return rc;
7028}
7029
7030
7031/**
7032 * Fetches a data dword and zero extends it to a qword.
7033 *
7034 * @returns Strict VBox status code.
7035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7036 * @param pu64Dst Where to return the qword.
7037 * @param iSegReg The index of the segment register to use for
7038 * this access. The base and limits are checked.
7039 * @param GCPtrMem The address of the guest memory.
7040 */
7041VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7042{
7043 /* The lazy approach for now... */
7044 uint32_t const *pu32Src;
7045 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7046 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7047 if (rc == VINF_SUCCESS)
7048 {
7049 *pu64Dst = *pu32Src;
7050 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7051 }
7052 return rc;
7053}
7054
7055
7056#ifdef IEM_WITH_SETJMP
7057
7058/**
7059 * Fetches a data dword, longjmp on error, fallback/safe version.
7060 *
7061 * @returns The dword
7062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7063 * @param iSegReg The index of the segment register to use for
7064 * this access. The base and limits are checked.
7065 * @param GCPtrMem The address of the guest memory.
7066 */
7067uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7068{
7069 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7070 sizeof(*pu32Src) - 1);
7071 uint32_t const u32Ret = *pu32Src;
7072 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7073 return u32Ret;
7074}
7075
7076
7077/**
7078 * Fetches a data dword, longjmp on error.
7079 *
7080 * @returns The dword
7081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7082 * @param iSegReg The index of the segment register to use for
7083 * this access. The base and limits are checked.
7084 * @param GCPtrMem The address of the guest memory.
7085 */
7086uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7087{
7088# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
7089 /*
7090 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
7091 */
7092 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
7093 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
7094 {
7095 /*
7096 * TLB lookup.
7097 */
7098 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
7099 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
7100 if (pTlbe->uTag == uTag)
7101 {
7102 /*
7103 * Check TLB page table level access flags.
7104 */
7105 uint64_t const fNoUser = IEM_GET_CPL(pVCpu) == 3 ? IEMTLBE_F_PT_NO_USER : 0;
7106 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
7107 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
7108 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7109 {
7110 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
7111
7112 /*
7113 * Alignment check:
7114 */
7115 /** @todo check priority \#AC vs \#PF */
7116 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
7117 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7118 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7119 || IEM_GET_CPL(pVCpu) != 3)
7120 {
7121 /*
7122 * Fetch and return the dword
7123 */
7124 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
7125 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
7126 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
7127 }
7128 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
7129 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7130 }
7131 }
7132 }
7133
7134 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
7135 outdated page pointer, or other troubles. */
7136 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
7137 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
7138
7139# else
7140 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
7141 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7142 uint32_t const u32Ret = *pu32Src;
7143 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7144 return u32Ret;
7145# endif
7146}
7147#endif
7148
7149
7150#ifdef SOME_UNUSED_FUNCTION
7151/**
7152 * Fetches a data dword and sign extends it to a qword.
7153 *
7154 * @returns Strict VBox status code.
7155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7156 * @param pu64Dst Where to return the sign extended value.
7157 * @param iSegReg The index of the segment register to use for
7158 * this access. The base and limits are checked.
7159 * @param GCPtrMem The address of the guest memory.
7160 */
7161VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7162{
7163 /* The lazy approach for now... */
7164 int32_t const *pi32Src;
7165 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7166 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7167 if (rc == VINF_SUCCESS)
7168 {
7169 *pu64Dst = *pi32Src;
7170 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7171 }
7172#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7173 else
7174 *pu64Dst = 0;
7175#endif
7176 return rc;
7177}
7178#endif
7179
7180
7181/**
7182 * Fetches a data qword.
7183 *
7184 * @returns Strict VBox status code.
7185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7186 * @param pu64Dst Where to return the qword.
7187 * @param iSegReg The index of the segment register to use for
7188 * this access. The base and limits are checked.
7189 * @param GCPtrMem The address of the guest memory.
7190 */
7191VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7192{
7193 /* The lazy approach for now... */
7194 uint64_t const *pu64Src;
7195 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7196 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7197 if (rc == VINF_SUCCESS)
7198 {
7199 *pu64Dst = *pu64Src;
7200 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7201 }
7202 return rc;
7203}
7204
7205
7206#ifdef IEM_WITH_SETJMP
7207/**
7208 * Fetches a data qword, longjmp on error.
7209 *
7210 * @returns The qword.
7211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7212 * @param iSegReg The index of the segment register to use for
7213 * this access. The base and limits are checked.
7214 * @param GCPtrMem The address of the guest memory.
7215 */
7216uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7217{
7218 /* The lazy approach for now... */
7219 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
7220 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7221 uint64_t const u64Ret = *pu64Src;
7222 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7223 return u64Ret;
7224}
7225#endif
7226
7227
7228/**
7229 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7230 *
7231 * @returns Strict VBox status code.
7232 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7233 * @param pu64Dst Where to return the qword.
7234 * @param iSegReg The index of the segment register to use for
7235 * this access. The base and limits are checked.
7236 * @param GCPtrMem The address of the guest memory.
7237 */
7238VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7239{
7240 /* The lazy approach for now... */
7241 uint64_t const *pu64Src;
7242 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7243 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7244 if (rc == VINF_SUCCESS)
7245 {
7246 *pu64Dst = *pu64Src;
7247 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7248 }
7249 return rc;
7250}
7251
7252
7253#ifdef IEM_WITH_SETJMP
7254/**
7255 * Fetches a data qword, longjmp on error.
7256 *
7257 * @returns The qword.
7258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7259 * @param iSegReg The index of the segment register to use for
7260 * this access. The base and limits are checked.
7261 * @param GCPtrMem The address of the guest memory.
7262 */
7263uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7264{
7265 /* The lazy approach for now... */
7266 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7267 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7268 uint64_t const u64Ret = *pu64Src;
7269 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7270 return u64Ret;
7271}
7272#endif
7273
7274
7275/**
7276 * Fetches a data tword.
7277 *
7278 * @returns Strict VBox status code.
7279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7280 * @param pr80Dst Where to return the tword.
7281 * @param iSegReg The index of the segment register to use for
7282 * this access. The base and limits are checked.
7283 * @param GCPtrMem The address of the guest memory.
7284 */
7285VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7286{
7287 /* The lazy approach for now... */
7288 PCRTFLOAT80U pr80Src;
7289 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7290 if (rc == VINF_SUCCESS)
7291 {
7292 *pr80Dst = *pr80Src;
7293 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7294 }
7295 return rc;
7296}
7297
7298
7299#ifdef IEM_WITH_SETJMP
7300/**
7301 * Fetches a data tword, longjmp on error.
7302 *
7303 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7304 * @param pr80Dst Where to return the tword.
7305 * @param iSegReg The index of the segment register to use for
7306 * this access. The base and limits are checked.
7307 * @param GCPtrMem The address of the guest memory.
7308 */
7309void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7310{
7311 /* The lazy approach for now... */
7312 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7313 *pr80Dst = *pr80Src;
7314 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7315}
7316#endif
7317
7318
7319/**
7320 * Fetches a data decimal tword.
7321 *
7322 * @returns Strict VBox status code.
7323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7324 * @param pd80Dst Where to return the tword.
7325 * @param iSegReg The index of the segment register to use for
7326 * this access. The base and limits are checked.
7327 * @param GCPtrMem The address of the guest memory.
7328 */
7329VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7330{
7331 /* The lazy approach for now... */
7332 PCRTPBCD80U pd80Src;
7333 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7334 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7335 if (rc == VINF_SUCCESS)
7336 {
7337 *pd80Dst = *pd80Src;
7338 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7339 }
7340 return rc;
7341}
7342
7343
7344#ifdef IEM_WITH_SETJMP
7345/**
7346 * Fetches a data decimal tword, longjmp on error.
7347 *
7348 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7349 * @param pd80Dst Where to return the tword.
7350 * @param iSegReg The index of the segment register to use for
7351 * this access. The base and limits are checked.
7352 * @param GCPtrMem The address of the guest memory.
7353 */
7354void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7355{
7356 /* The lazy approach for now... */
7357 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7358 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7359 *pd80Dst = *pd80Src;
7360 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7361}
7362#endif
7363
7364
7365/**
7366 * Fetches a data dqword (double qword), generally SSE related.
7367 *
7368 * @returns Strict VBox status code.
7369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7370 * @param pu128Dst Where to return the qword.
7371 * @param iSegReg The index of the segment register to use for
7372 * this access. The base and limits are checked.
7373 * @param GCPtrMem The address of the guest memory.
7374 */
7375VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7376{
7377 /* The lazy approach for now... */
7378 PCRTUINT128U pu128Src;
7379 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7380 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7381 if (rc == VINF_SUCCESS)
7382 {
7383 pu128Dst->au64[0] = pu128Src->au64[0];
7384 pu128Dst->au64[1] = pu128Src->au64[1];
7385 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7386 }
7387 return rc;
7388}
7389
7390
7391#ifdef IEM_WITH_SETJMP
7392/**
7393 * Fetches a data dqword (double qword), generally SSE related.
7394 *
7395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7396 * @param pu128Dst Where to return the qword.
7397 * @param iSegReg The index of the segment register to use for
7398 * this access. The base and limits are checked.
7399 * @param GCPtrMem The address of the guest memory.
7400 */
7401void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7402{
7403 /* The lazy approach for now... */
7404 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7405 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7406 pu128Dst->au64[0] = pu128Src->au64[0];
7407 pu128Dst->au64[1] = pu128Src->au64[1];
7408 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7409}
7410#endif
7411
7412
7413/**
7414 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7415 * related.
7416 *
7417 * Raises \#GP(0) if not aligned.
7418 *
7419 * @returns Strict VBox status code.
7420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7421 * @param pu128Dst Where to return the qword.
7422 * @param iSegReg The index of the segment register to use for
7423 * this access. The base and limits are checked.
7424 * @param GCPtrMem The address of the guest memory.
7425 */
7426VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7427{
7428 /* The lazy approach for now... */
7429 PCRTUINT128U pu128Src;
7430 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7431 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7432 if (rc == VINF_SUCCESS)
7433 {
7434 pu128Dst->au64[0] = pu128Src->au64[0];
7435 pu128Dst->au64[1] = pu128Src->au64[1];
7436 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7437 }
7438 return rc;
7439}
7440
7441
7442#ifdef IEM_WITH_SETJMP
7443/**
7444 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7445 * related, longjmp on error.
7446 *
7447 * Raises \#GP(0) if not aligned.
7448 *
7449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7450 * @param pu128Dst Where to return the qword.
7451 * @param iSegReg The index of the segment register to use for
7452 * this access. The base and limits are checked.
7453 * @param GCPtrMem The address of the guest memory.
7454 */
7455void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7456 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7457{
7458 /* The lazy approach for now... */
7459 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7460 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7461 pu128Dst->au64[0] = pu128Src->au64[0];
7462 pu128Dst->au64[1] = pu128Src->au64[1];
7463 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7464}
7465#endif
7466
7467
7468/**
7469 * Fetches a data oword (octo word), generally AVX related.
7470 *
7471 * @returns Strict VBox status code.
7472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7473 * @param pu256Dst Where to return the qword.
7474 * @param iSegReg The index of the segment register to use for
7475 * this access. The base and limits are checked.
7476 * @param GCPtrMem The address of the guest memory.
7477 */
7478VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7479{
7480 /* The lazy approach for now... */
7481 PCRTUINT256U pu256Src;
7482 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7483 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7484 if (rc == VINF_SUCCESS)
7485 {
7486 pu256Dst->au64[0] = pu256Src->au64[0];
7487 pu256Dst->au64[1] = pu256Src->au64[1];
7488 pu256Dst->au64[2] = pu256Src->au64[2];
7489 pu256Dst->au64[3] = pu256Src->au64[3];
7490 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7491 }
7492 return rc;
7493}
7494
7495
7496#ifdef IEM_WITH_SETJMP
7497/**
7498 * Fetches a data oword (octo word), generally AVX related.
7499 *
7500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7501 * @param pu256Dst Where to return the qword.
7502 * @param iSegReg The index of the segment register to use for
7503 * this access. The base and limits are checked.
7504 * @param GCPtrMem The address of the guest memory.
7505 */
7506void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7507{
7508 /* The lazy approach for now... */
7509 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7510 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7511 pu256Dst->au64[0] = pu256Src->au64[0];
7512 pu256Dst->au64[1] = pu256Src->au64[1];
7513 pu256Dst->au64[2] = pu256Src->au64[2];
7514 pu256Dst->au64[3] = pu256Src->au64[3];
7515 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7516}
7517#endif
7518
7519
7520/**
7521 * Fetches a data oword (octo word) at an aligned address, generally AVX
7522 * related.
7523 *
7524 * Raises \#GP(0) if not aligned.
7525 *
7526 * @returns Strict VBox status code.
7527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7528 * @param pu256Dst Where to return the qword.
7529 * @param iSegReg The index of the segment register to use for
7530 * this access. The base and limits are checked.
7531 * @param GCPtrMem The address of the guest memory.
7532 */
7533VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7534{
7535 /* The lazy approach for now... */
7536 PCRTUINT256U pu256Src;
7537 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7538 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7539 if (rc == VINF_SUCCESS)
7540 {
7541 pu256Dst->au64[0] = pu256Src->au64[0];
7542 pu256Dst->au64[1] = pu256Src->au64[1];
7543 pu256Dst->au64[2] = pu256Src->au64[2];
7544 pu256Dst->au64[3] = pu256Src->au64[3];
7545 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7546 }
7547 return rc;
7548}
7549
7550
7551#ifdef IEM_WITH_SETJMP
7552/**
7553 * Fetches a data oword (octo word) at an aligned address, generally AVX
7554 * related, longjmp on error.
7555 *
7556 * Raises \#GP(0) if not aligned.
7557 *
7558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7559 * @param pu256Dst Where to return the qword.
7560 * @param iSegReg The index of the segment register to use for
7561 * this access. The base and limits are checked.
7562 * @param GCPtrMem The address of the guest memory.
7563 */
7564void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7565 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7566{
7567 /* The lazy approach for now... */
7568 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7569 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7570 pu256Dst->au64[0] = pu256Src->au64[0];
7571 pu256Dst->au64[1] = pu256Src->au64[1];
7572 pu256Dst->au64[2] = pu256Src->au64[2];
7573 pu256Dst->au64[3] = pu256Src->au64[3];
7574 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7575}
7576#endif
7577
7578
7579
7580/**
7581 * Fetches a descriptor register (lgdt, lidt).
7582 *
7583 * @returns Strict VBox status code.
7584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7585 * @param pcbLimit Where to return the limit.
7586 * @param pGCPtrBase Where to return the base.
7587 * @param iSegReg The index of the segment register to use for
7588 * this access. The base and limits are checked.
7589 * @param GCPtrMem The address of the guest memory.
7590 * @param enmOpSize The effective operand size.
7591 */
7592VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7593 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7594{
7595 /*
7596 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7597 * little special:
7598 * - The two reads are done separately.
7599 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7600 * - We suspect the 386 to actually commit the limit before the base in
7601 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7602 * don't try emulate this eccentric behavior, because it's not well
7603 * enough understood and rather hard to trigger.
7604 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7605 */
7606 VBOXSTRICTRC rcStrict;
7607 if (IEM_IS_64BIT_CODE(pVCpu))
7608 {
7609 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7610 if (rcStrict == VINF_SUCCESS)
7611 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7612 }
7613 else
7614 {
7615 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7616 if (enmOpSize == IEMMODE_32BIT)
7617 {
7618 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7619 {
7620 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7621 if (rcStrict == VINF_SUCCESS)
7622 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7623 }
7624 else
7625 {
7626 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7627 if (rcStrict == VINF_SUCCESS)
7628 {
7629 *pcbLimit = (uint16_t)uTmp;
7630 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7631 }
7632 }
7633 if (rcStrict == VINF_SUCCESS)
7634 *pGCPtrBase = uTmp;
7635 }
7636 else
7637 {
7638 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7639 if (rcStrict == VINF_SUCCESS)
7640 {
7641 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7642 if (rcStrict == VINF_SUCCESS)
7643 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7644 }
7645 }
7646 }
7647 return rcStrict;
7648}
7649
7650
7651
7652/**
7653 * Stores a data byte.
7654 *
7655 * @returns Strict VBox status code.
7656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7657 * @param iSegReg The index of the segment register to use for
7658 * this access. The base and limits are checked.
7659 * @param GCPtrMem The address of the guest memory.
7660 * @param u8Value The value to store.
7661 */
7662VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7663{
7664 /* The lazy approach for now... */
7665 uint8_t *pu8Dst;
7666 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7667 if (rc == VINF_SUCCESS)
7668 {
7669 *pu8Dst = u8Value;
7670 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7671 }
7672 return rc;
7673}
7674
7675
7676#ifdef IEM_WITH_SETJMP
7677/**
7678 * Stores a data byte, longjmp on error.
7679 *
7680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7681 * @param iSegReg The index of the segment register to use for
7682 * this access. The base and limits are checked.
7683 * @param GCPtrMem The address of the guest memory.
7684 * @param u8Value The value to store.
7685 */
7686void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP
7687{
7688 /* The lazy approach for now... */
7689 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7690 *pu8Dst = u8Value;
7691 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7692}
7693#endif
7694
7695
7696/**
7697 * Stores a data word.
7698 *
7699 * @returns Strict VBox status code.
7700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7701 * @param iSegReg The index of the segment register to use for
7702 * this access. The base and limits are checked.
7703 * @param GCPtrMem The address of the guest memory.
7704 * @param u16Value The value to store.
7705 */
7706VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7707{
7708 /* The lazy approach for now... */
7709 uint16_t *pu16Dst;
7710 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7711 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7712 if (rc == VINF_SUCCESS)
7713 {
7714 *pu16Dst = u16Value;
7715 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7716 }
7717 return rc;
7718}
7719
7720
7721#ifdef IEM_WITH_SETJMP
7722/**
7723 * Stores a data word, longjmp on error.
7724 *
7725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7726 * @param iSegReg The index of the segment register to use for
7727 * this access. The base and limits are checked.
7728 * @param GCPtrMem The address of the guest memory.
7729 * @param u16Value The value to store.
7730 */
7731void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP
7732{
7733 /* The lazy approach for now... */
7734 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7735 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7736 *pu16Dst = u16Value;
7737 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7738}
7739#endif
7740
7741
7742/**
7743 * Stores a data dword.
7744 *
7745 * @returns Strict VBox status code.
7746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7747 * @param iSegReg The index of the segment register to use for
7748 * this access. The base and limits are checked.
7749 * @param GCPtrMem The address of the guest memory.
7750 * @param u32Value The value to store.
7751 */
7752VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7753{
7754 /* The lazy approach for now... */
7755 uint32_t *pu32Dst;
7756 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7757 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7758 if (rc == VINF_SUCCESS)
7759 {
7760 *pu32Dst = u32Value;
7761 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7762 }
7763 return rc;
7764}
7765
7766
7767#ifdef IEM_WITH_SETJMP
7768/**
7769 * Stores a data dword.
7770 *
7771 * @returns Strict VBox status code.
7772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7773 * @param iSegReg The index of the segment register to use for
7774 * this access. The base and limits are checked.
7775 * @param GCPtrMem The address of the guest memory.
7776 * @param u32Value The value to store.
7777 */
7778void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP
7779{
7780 /* The lazy approach for now... */
7781 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7782 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7783 *pu32Dst = u32Value;
7784 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7785}
7786#endif
7787
7788
7789/**
7790 * Stores a data qword.
7791 *
7792 * @returns Strict VBox status code.
7793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7794 * @param iSegReg The index of the segment register to use for
7795 * this access. The base and limits are checked.
7796 * @param GCPtrMem The address of the guest memory.
7797 * @param u64Value The value to store.
7798 */
7799VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7800{
7801 /* The lazy approach for now... */
7802 uint64_t *pu64Dst;
7803 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7804 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7805 if (rc == VINF_SUCCESS)
7806 {
7807 *pu64Dst = u64Value;
7808 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7809 }
7810 return rc;
7811}
7812
7813
7814#ifdef IEM_WITH_SETJMP
7815/**
7816 * Stores a data qword, longjmp on error.
7817 *
7818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7819 * @param iSegReg The index of the segment register to use for
7820 * this access. The base and limits are checked.
7821 * @param GCPtrMem The address of the guest memory.
7822 * @param u64Value The value to store.
7823 */
7824void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP
7825{
7826 /* The lazy approach for now... */
7827 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7828 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7829 *pu64Dst = u64Value;
7830 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7831}
7832#endif
7833
7834
7835/**
7836 * Stores a data dqword.
7837 *
7838 * @returns Strict VBox status code.
7839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7840 * @param iSegReg The index of the segment register to use for
7841 * this access. The base and limits are checked.
7842 * @param GCPtrMem The address of the guest memory.
7843 * @param u128Value The value to store.
7844 */
7845VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7846{
7847 /* The lazy approach for now... */
7848 PRTUINT128U pu128Dst;
7849 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7850 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7851 if (rc == VINF_SUCCESS)
7852 {
7853 pu128Dst->au64[0] = u128Value.au64[0];
7854 pu128Dst->au64[1] = u128Value.au64[1];
7855 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7856 }
7857 return rc;
7858}
7859
7860
7861#ifdef IEM_WITH_SETJMP
7862/**
7863 * Stores a data dqword, longjmp on error.
7864 *
7865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7866 * @param iSegReg The index of the segment register to use for
7867 * this access. The base and limits are checked.
7868 * @param GCPtrMem The address of the guest memory.
7869 * @param u128Value The value to store.
7870 */
7871void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7872{
7873 /* The lazy approach for now... */
7874 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7875 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7876 pu128Dst->au64[0] = u128Value.au64[0];
7877 pu128Dst->au64[1] = u128Value.au64[1];
7878 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7879}
7880#endif
7881
7882
7883/**
7884 * Stores a data dqword, SSE aligned.
7885 *
7886 * @returns Strict VBox status code.
7887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7888 * @param iSegReg The index of the segment register to use for
7889 * this access. The base and limits are checked.
7890 * @param GCPtrMem The address of the guest memory.
7891 * @param u128Value The value to store.
7892 */
7893VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7894{
7895 /* The lazy approach for now... */
7896 PRTUINT128U pu128Dst;
7897 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7898 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7899 if (rc == VINF_SUCCESS)
7900 {
7901 pu128Dst->au64[0] = u128Value.au64[0];
7902 pu128Dst->au64[1] = u128Value.au64[1];
7903 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7904 }
7905 return rc;
7906}
7907
7908
7909#ifdef IEM_WITH_SETJMP
7910/**
7911 * Stores a data dqword, SSE aligned.
7912 *
7913 * @returns Strict VBox status code.
7914 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7915 * @param iSegReg The index of the segment register to use for
7916 * this access. The base and limits are checked.
7917 * @param GCPtrMem The address of the guest memory.
7918 * @param u128Value The value to store.
7919 */
7920void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7921 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7922{
7923 /* The lazy approach for now... */
7924 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7925 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7926 pu128Dst->au64[0] = u128Value.au64[0];
7927 pu128Dst->au64[1] = u128Value.au64[1];
7928 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7929}
7930#endif
7931
7932
7933/**
7934 * Stores a data dqword.
7935 *
7936 * @returns Strict VBox status code.
7937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7938 * @param iSegReg The index of the segment register to use for
7939 * this access. The base and limits are checked.
7940 * @param GCPtrMem The address of the guest memory.
7941 * @param pu256Value Pointer to the value to store.
7942 */
7943VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7944{
7945 /* The lazy approach for now... */
7946 PRTUINT256U pu256Dst;
7947 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7948 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7949 if (rc == VINF_SUCCESS)
7950 {
7951 pu256Dst->au64[0] = pu256Value->au64[0];
7952 pu256Dst->au64[1] = pu256Value->au64[1];
7953 pu256Dst->au64[2] = pu256Value->au64[2];
7954 pu256Dst->au64[3] = pu256Value->au64[3];
7955 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7956 }
7957 return rc;
7958}
7959
7960
7961#ifdef IEM_WITH_SETJMP
7962/**
7963 * Stores a data dqword, longjmp on error.
7964 *
7965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7966 * @param iSegReg The index of the segment register to use for
7967 * this access. The base and limits are checked.
7968 * @param GCPtrMem The address of the guest memory.
7969 * @param pu256Value Pointer to the value to store.
7970 */
7971void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7972{
7973 /* The lazy approach for now... */
7974 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7975 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7976 pu256Dst->au64[0] = pu256Value->au64[0];
7977 pu256Dst->au64[1] = pu256Value->au64[1];
7978 pu256Dst->au64[2] = pu256Value->au64[2];
7979 pu256Dst->au64[3] = pu256Value->au64[3];
7980 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7981}
7982#endif
7983
7984
7985/**
7986 * Stores a data dqword, AVX \#GP(0) aligned.
7987 *
7988 * @returns Strict VBox status code.
7989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7990 * @param iSegReg The index of the segment register to use for
7991 * this access. The base and limits are checked.
7992 * @param GCPtrMem The address of the guest memory.
7993 * @param pu256Value Pointer to the value to store.
7994 */
7995VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7996{
7997 /* The lazy approach for now... */
7998 PRTUINT256U pu256Dst;
7999 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8000 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
8001 if (rc == VINF_SUCCESS)
8002 {
8003 pu256Dst->au64[0] = pu256Value->au64[0];
8004 pu256Dst->au64[1] = pu256Value->au64[1];
8005 pu256Dst->au64[2] = pu256Value->au64[2];
8006 pu256Dst->au64[3] = pu256Value->au64[3];
8007 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8008 }
8009 return rc;
8010}
8011
8012
8013#ifdef IEM_WITH_SETJMP
8014/**
8015 * Stores a data dqword, AVX aligned.
8016 *
8017 * @returns Strict VBox status code.
8018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8019 * @param iSegReg The index of the segment register to use for
8020 * this access. The base and limits are checked.
8021 * @param GCPtrMem The address of the guest memory.
8022 * @param pu256Value Pointer to the value to store.
8023 */
8024void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
8025 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
8026{
8027 /* The lazy approach for now... */
8028 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8029 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
8030 pu256Dst->au64[0] = pu256Value->au64[0];
8031 pu256Dst->au64[1] = pu256Value->au64[1];
8032 pu256Dst->au64[2] = pu256Value->au64[2];
8033 pu256Dst->au64[3] = pu256Value->au64[3];
8034 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8035}
8036#endif
8037
8038
8039/**
8040 * Stores a descriptor register (sgdt, sidt).
8041 *
8042 * @returns Strict VBox status code.
8043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8044 * @param cbLimit The limit.
8045 * @param GCPtrBase The base address.
8046 * @param iSegReg The index of the segment register to use for
8047 * this access. The base and limits are checked.
8048 * @param GCPtrMem The address of the guest memory.
8049 */
8050VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8051{
8052 /*
8053 * The SIDT and SGDT instructions actually stores the data using two
8054 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
8055 * does not respond to opsize prefixes.
8056 */
8057 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
8058 if (rcStrict == VINF_SUCCESS)
8059 {
8060 if (IEM_IS_16BIT_CODE(pVCpu))
8061 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
8062 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
8063 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
8064 else if (IEM_IS_32BIT_CODE(pVCpu))
8065 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
8066 else
8067 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
8068 }
8069 return rcStrict;
8070}
8071
8072
8073/**
8074 * Pushes a word onto the stack.
8075 *
8076 * @returns Strict VBox status code.
8077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8078 * @param u16Value The value to push.
8079 */
8080VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
8081{
8082 /* Increment the stack pointer. */
8083 uint64_t uNewRsp;
8084 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
8085
8086 /* Write the word the lazy way. */
8087 uint16_t *pu16Dst;
8088 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8089 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8090 if (rc == VINF_SUCCESS)
8091 {
8092 *pu16Dst = u16Value;
8093 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8094 }
8095
8096 /* Commit the new RSP value unless we an access handler made trouble. */
8097 if (rc == VINF_SUCCESS)
8098 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8099
8100 return rc;
8101}
8102
8103
8104/**
8105 * Pushes a dword onto the stack.
8106 *
8107 * @returns Strict VBox status code.
8108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8109 * @param u32Value The value to push.
8110 */
8111VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8112{
8113 /* Increment the stack pointer. */
8114 uint64_t uNewRsp;
8115 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8116
8117 /* Write the dword the lazy way. */
8118 uint32_t *pu32Dst;
8119 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8120 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8121 if (rc == VINF_SUCCESS)
8122 {
8123 *pu32Dst = u32Value;
8124 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8125 }
8126
8127 /* Commit the new RSP value unless we an access handler made trouble. */
8128 if (rc == VINF_SUCCESS)
8129 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8130
8131 return rc;
8132}
8133
8134
8135/**
8136 * Pushes a dword segment register value onto the stack.
8137 *
8138 * @returns Strict VBox status code.
8139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8140 * @param u32Value The value to push.
8141 */
8142VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8143{
8144 /* Increment the stack pointer. */
8145 uint64_t uNewRsp;
8146 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8147
8148 /* The intel docs talks about zero extending the selector register
8149 value. My actual intel CPU here might be zero extending the value
8150 but it still only writes the lower word... */
8151 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
8152 * happens when crossing an electric page boundrary, is the high word checked
8153 * for write accessibility or not? Probably it is. What about segment limits?
8154 * It appears this behavior is also shared with trap error codes.
8155 *
8156 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
8157 * ancient hardware when it actually did change. */
8158 uint16_t *pu16Dst;
8159 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
8160 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
8161 if (rc == VINF_SUCCESS)
8162 {
8163 *pu16Dst = (uint16_t)u32Value;
8164 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
8165 }
8166
8167 /* Commit the new RSP value unless we an access handler made trouble. */
8168 if (rc == VINF_SUCCESS)
8169 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8170
8171 return rc;
8172}
8173
8174
8175/**
8176 * Pushes a qword onto the stack.
8177 *
8178 * @returns Strict VBox status code.
8179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8180 * @param u64Value The value to push.
8181 */
8182VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
8183{
8184 /* Increment the stack pointer. */
8185 uint64_t uNewRsp;
8186 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
8187
8188 /* Write the word the lazy way. */
8189 uint64_t *pu64Dst;
8190 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8191 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8192 if (rc == VINF_SUCCESS)
8193 {
8194 *pu64Dst = u64Value;
8195 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8196 }
8197
8198 /* Commit the new RSP value unless we an access handler made trouble. */
8199 if (rc == VINF_SUCCESS)
8200 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8201
8202 return rc;
8203}
8204
8205
8206/**
8207 * Pops a word from the stack.
8208 *
8209 * @returns Strict VBox status code.
8210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8211 * @param pu16Value Where to store the popped value.
8212 */
8213VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
8214{
8215 /* Increment the stack pointer. */
8216 uint64_t uNewRsp;
8217 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
8218
8219 /* Write the word the lazy way. */
8220 uint16_t const *pu16Src;
8221 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8222 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8223 if (rc == VINF_SUCCESS)
8224 {
8225 *pu16Value = *pu16Src;
8226 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8227
8228 /* Commit the new RSP value. */
8229 if (rc == VINF_SUCCESS)
8230 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8231 }
8232
8233 return rc;
8234}
8235
8236
8237/**
8238 * Pops a dword from the stack.
8239 *
8240 * @returns Strict VBox status code.
8241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8242 * @param pu32Value Where to store the popped value.
8243 */
8244VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
8245{
8246 /* Increment the stack pointer. */
8247 uint64_t uNewRsp;
8248 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
8249
8250 /* Write the word the lazy way. */
8251 uint32_t const *pu32Src;
8252 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8253 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8254 if (rc == VINF_SUCCESS)
8255 {
8256 *pu32Value = *pu32Src;
8257 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8258
8259 /* Commit the new RSP value. */
8260 if (rc == VINF_SUCCESS)
8261 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8262 }
8263
8264 return rc;
8265}
8266
8267
8268/**
8269 * Pops a qword from the stack.
8270 *
8271 * @returns Strict VBox status code.
8272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8273 * @param pu64Value Where to store the popped value.
8274 */
8275VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
8276{
8277 /* Increment the stack pointer. */
8278 uint64_t uNewRsp;
8279 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8280
8281 /* Write the word the lazy way. */
8282 uint64_t const *pu64Src;
8283 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8284 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8285 if (rc == VINF_SUCCESS)
8286 {
8287 *pu64Value = *pu64Src;
8288 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8289
8290 /* Commit the new RSP value. */
8291 if (rc == VINF_SUCCESS)
8292 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8293 }
8294
8295 return rc;
8296}
8297
8298
8299/**
8300 * Pushes a word onto the stack, using a temporary stack pointer.
8301 *
8302 * @returns Strict VBox status code.
8303 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8304 * @param u16Value The value to push.
8305 * @param pTmpRsp Pointer to the temporary stack pointer.
8306 */
8307VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8308{
8309 /* Increment the stack pointer. */
8310 RTUINT64U NewRsp = *pTmpRsp;
8311 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8312
8313 /* Write the word the lazy way. */
8314 uint16_t *pu16Dst;
8315 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8316 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8317 if (rc == VINF_SUCCESS)
8318 {
8319 *pu16Dst = u16Value;
8320 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8321 }
8322
8323 /* Commit the new RSP value unless we an access handler made trouble. */
8324 if (rc == VINF_SUCCESS)
8325 *pTmpRsp = NewRsp;
8326
8327 return rc;
8328}
8329
8330
8331/**
8332 * Pushes a dword onto the stack, using a temporary stack pointer.
8333 *
8334 * @returns Strict VBox status code.
8335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8336 * @param u32Value The value to push.
8337 * @param pTmpRsp Pointer to the temporary stack pointer.
8338 */
8339VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8340{
8341 /* Increment the stack pointer. */
8342 RTUINT64U NewRsp = *pTmpRsp;
8343 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8344
8345 /* Write the word the lazy way. */
8346 uint32_t *pu32Dst;
8347 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8348 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8349 if (rc == VINF_SUCCESS)
8350 {
8351 *pu32Dst = u32Value;
8352 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8353 }
8354
8355 /* Commit the new RSP value unless we an access handler made trouble. */
8356 if (rc == VINF_SUCCESS)
8357 *pTmpRsp = NewRsp;
8358
8359 return rc;
8360}
8361
8362
8363/**
8364 * Pushes a dword onto the stack, using a temporary stack pointer.
8365 *
8366 * @returns Strict VBox status code.
8367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8368 * @param u64Value The value to push.
8369 * @param pTmpRsp Pointer to the temporary stack pointer.
8370 */
8371VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8372{
8373 /* Increment the stack pointer. */
8374 RTUINT64U NewRsp = *pTmpRsp;
8375 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8376
8377 /* Write the word the lazy way. */
8378 uint64_t *pu64Dst;
8379 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8380 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8381 if (rc == VINF_SUCCESS)
8382 {
8383 *pu64Dst = u64Value;
8384 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8385 }
8386
8387 /* Commit the new RSP value unless we an access handler made trouble. */
8388 if (rc == VINF_SUCCESS)
8389 *pTmpRsp = NewRsp;
8390
8391 return rc;
8392}
8393
8394
8395/**
8396 * Pops a word from the stack, using a temporary stack pointer.
8397 *
8398 * @returns Strict VBox status code.
8399 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8400 * @param pu16Value Where to store the popped value.
8401 * @param pTmpRsp Pointer to the temporary stack pointer.
8402 */
8403VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8404{
8405 /* Increment the stack pointer. */
8406 RTUINT64U NewRsp = *pTmpRsp;
8407 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8408
8409 /* Write the word the lazy way. */
8410 uint16_t const *pu16Src;
8411 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8412 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8413 if (rc == VINF_SUCCESS)
8414 {
8415 *pu16Value = *pu16Src;
8416 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8417
8418 /* Commit the new RSP value. */
8419 if (rc == VINF_SUCCESS)
8420 *pTmpRsp = NewRsp;
8421 }
8422
8423 return rc;
8424}
8425
8426
8427/**
8428 * Pops a dword from the stack, using a temporary stack pointer.
8429 *
8430 * @returns Strict VBox status code.
8431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8432 * @param pu32Value Where to store the popped value.
8433 * @param pTmpRsp Pointer to the temporary stack pointer.
8434 */
8435VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8436{
8437 /* Increment the stack pointer. */
8438 RTUINT64U NewRsp = *pTmpRsp;
8439 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8440
8441 /* Write the word the lazy way. */
8442 uint32_t const *pu32Src;
8443 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8444 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8445 if (rc == VINF_SUCCESS)
8446 {
8447 *pu32Value = *pu32Src;
8448 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8449
8450 /* Commit the new RSP value. */
8451 if (rc == VINF_SUCCESS)
8452 *pTmpRsp = NewRsp;
8453 }
8454
8455 return rc;
8456}
8457
8458
8459/**
8460 * Pops a qword from the stack, using a temporary stack pointer.
8461 *
8462 * @returns Strict VBox status code.
8463 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8464 * @param pu64Value Where to store the popped value.
8465 * @param pTmpRsp Pointer to the temporary stack pointer.
8466 */
8467VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8468{
8469 /* Increment the stack pointer. */
8470 RTUINT64U NewRsp = *pTmpRsp;
8471 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8472
8473 /* Write the word the lazy way. */
8474 uint64_t const *pu64Src;
8475 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8476 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8477 if (rcStrict == VINF_SUCCESS)
8478 {
8479 *pu64Value = *pu64Src;
8480 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8481
8482 /* Commit the new RSP value. */
8483 if (rcStrict == VINF_SUCCESS)
8484 *pTmpRsp = NewRsp;
8485 }
8486
8487 return rcStrict;
8488}
8489
8490
8491/**
8492 * Begin a special stack push (used by interrupt, exceptions and such).
8493 *
8494 * This will raise \#SS or \#PF if appropriate.
8495 *
8496 * @returns Strict VBox status code.
8497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8498 * @param cbMem The number of bytes to push onto the stack.
8499 * @param cbAlign The alignment mask (7, 3, 1).
8500 * @param ppvMem Where to return the pointer to the stack memory.
8501 * As with the other memory functions this could be
8502 * direct access or bounce buffered access, so
8503 * don't commit register until the commit call
8504 * succeeds.
8505 * @param puNewRsp Where to return the new RSP value. This must be
8506 * passed unchanged to
8507 * iemMemStackPushCommitSpecial().
8508 */
8509VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8510 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8511{
8512 Assert(cbMem < UINT8_MAX);
8513 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8514 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8515 IEM_ACCESS_STACK_W, cbAlign);
8516}
8517
8518
8519/**
8520 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8521 *
8522 * This will update the rSP.
8523 *
8524 * @returns Strict VBox status code.
8525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8526 * @param pvMem The pointer returned by
8527 * iemMemStackPushBeginSpecial().
8528 * @param uNewRsp The new RSP value returned by
8529 * iemMemStackPushBeginSpecial().
8530 */
8531VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8532{
8533 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8534 if (rcStrict == VINF_SUCCESS)
8535 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8536 return rcStrict;
8537}
8538
8539
8540/**
8541 * Begin a special stack pop (used by iret, retf and such).
8542 *
8543 * This will raise \#SS or \#PF if appropriate.
8544 *
8545 * @returns Strict VBox status code.
8546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8547 * @param cbMem The number of bytes to pop from the stack.
8548 * @param cbAlign The alignment mask (7, 3, 1).
8549 * @param ppvMem Where to return the pointer to the stack memory.
8550 * @param puNewRsp Where to return the new RSP value. This must be
8551 * assigned to CPUMCTX::rsp manually some time
8552 * after iemMemStackPopDoneSpecial() has been
8553 * called.
8554 */
8555VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8556 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8557{
8558 Assert(cbMem < UINT8_MAX);
8559 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8560 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8561}
8562
8563
8564/**
8565 * Continue a special stack pop (used by iret and retf), for the purpose of
8566 * retrieving a new stack pointer.
8567 *
8568 * This will raise \#SS or \#PF if appropriate.
8569 *
8570 * @returns Strict VBox status code.
8571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8572 * @param off Offset from the top of the stack. This is zero
8573 * except in the retf case.
8574 * @param cbMem The number of bytes to pop from the stack.
8575 * @param ppvMem Where to return the pointer to the stack memory.
8576 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8577 * return this because all use of this function is
8578 * to retrieve a new value and anything we return
8579 * here would be discarded.)
8580 */
8581VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8582 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8583{
8584 Assert(cbMem < UINT8_MAX);
8585
8586 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8587 RTGCPTR GCPtrTop;
8588 if (IEM_IS_64BIT_CODE(pVCpu))
8589 GCPtrTop = uCurNewRsp;
8590 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8591 GCPtrTop = (uint32_t)uCurNewRsp;
8592 else
8593 GCPtrTop = (uint16_t)uCurNewRsp;
8594
8595 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8596 0 /* checked in iemMemStackPopBeginSpecial */);
8597}
8598
8599
8600/**
8601 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8602 * iemMemStackPopContinueSpecial).
8603 *
8604 * The caller will manually commit the rSP.
8605 *
8606 * @returns Strict VBox status code.
8607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8608 * @param pvMem The pointer returned by
8609 * iemMemStackPopBeginSpecial() or
8610 * iemMemStackPopContinueSpecial().
8611 */
8612VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8613{
8614 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8615}
8616
8617
8618/**
8619 * Fetches a system table byte.
8620 *
8621 * @returns Strict VBox status code.
8622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8623 * @param pbDst Where to return the byte.
8624 * @param iSegReg The index of the segment register to use for
8625 * this access. The base and limits are checked.
8626 * @param GCPtrMem The address of the guest memory.
8627 */
8628VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8629{
8630 /* The lazy approach for now... */
8631 uint8_t const *pbSrc;
8632 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8633 if (rc == VINF_SUCCESS)
8634 {
8635 *pbDst = *pbSrc;
8636 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8637 }
8638 return rc;
8639}
8640
8641
8642/**
8643 * Fetches a system table word.
8644 *
8645 * @returns Strict VBox status code.
8646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8647 * @param pu16Dst Where to return the word.
8648 * @param iSegReg The index of the segment register to use for
8649 * this access. The base and limits are checked.
8650 * @param GCPtrMem The address of the guest memory.
8651 */
8652VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8653{
8654 /* The lazy approach for now... */
8655 uint16_t const *pu16Src;
8656 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8657 if (rc == VINF_SUCCESS)
8658 {
8659 *pu16Dst = *pu16Src;
8660 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8661 }
8662 return rc;
8663}
8664
8665
8666/**
8667 * Fetches a system table dword.
8668 *
8669 * @returns Strict VBox status code.
8670 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8671 * @param pu32Dst Where to return the dword.
8672 * @param iSegReg The index of the segment register to use for
8673 * this access. The base and limits are checked.
8674 * @param GCPtrMem The address of the guest memory.
8675 */
8676VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8677{
8678 /* The lazy approach for now... */
8679 uint32_t const *pu32Src;
8680 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8681 if (rc == VINF_SUCCESS)
8682 {
8683 *pu32Dst = *pu32Src;
8684 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8685 }
8686 return rc;
8687}
8688
8689
8690/**
8691 * Fetches a system table qword.
8692 *
8693 * @returns Strict VBox status code.
8694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8695 * @param pu64Dst Where to return the qword.
8696 * @param iSegReg The index of the segment register to use for
8697 * this access. The base and limits are checked.
8698 * @param GCPtrMem The address of the guest memory.
8699 */
8700VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8701{
8702 /* The lazy approach for now... */
8703 uint64_t const *pu64Src;
8704 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8705 if (rc == VINF_SUCCESS)
8706 {
8707 *pu64Dst = *pu64Src;
8708 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8709 }
8710 return rc;
8711}
8712
8713
8714/**
8715 * Fetches a descriptor table entry with caller specified error code.
8716 *
8717 * @returns Strict VBox status code.
8718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8719 * @param pDesc Where to return the descriptor table entry.
8720 * @param uSel The selector which table entry to fetch.
8721 * @param uXcpt The exception to raise on table lookup error.
8722 * @param uErrorCode The error code associated with the exception.
8723 */
8724static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8725 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8726{
8727 AssertPtr(pDesc);
8728 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8729
8730 /** @todo did the 286 require all 8 bytes to be accessible? */
8731 /*
8732 * Get the selector table base and check bounds.
8733 */
8734 RTGCPTR GCPtrBase;
8735 if (uSel & X86_SEL_LDT)
8736 {
8737 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8738 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8739 {
8740 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8741 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8742 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8743 uErrorCode, 0);
8744 }
8745
8746 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8747 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8748 }
8749 else
8750 {
8751 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8752 {
8753 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8754 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8755 uErrorCode, 0);
8756 }
8757 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8758 }
8759
8760 /*
8761 * Read the legacy descriptor and maybe the long mode extensions if
8762 * required.
8763 */
8764 VBOXSTRICTRC rcStrict;
8765 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8766 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8767 else
8768 {
8769 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8770 if (rcStrict == VINF_SUCCESS)
8771 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8772 if (rcStrict == VINF_SUCCESS)
8773 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8774 if (rcStrict == VINF_SUCCESS)
8775 pDesc->Legacy.au16[3] = 0;
8776 else
8777 return rcStrict;
8778 }
8779
8780 if (rcStrict == VINF_SUCCESS)
8781 {
8782 if ( !IEM_IS_LONG_MODE(pVCpu)
8783 || pDesc->Legacy.Gen.u1DescType)
8784 pDesc->Long.au64[1] = 0;
8785 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8786 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8787 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8788 else
8789 {
8790 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8791 /** @todo is this the right exception? */
8792 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8793 }
8794 }
8795 return rcStrict;
8796}
8797
8798
8799/**
8800 * Fetches a descriptor table entry.
8801 *
8802 * @returns Strict VBox status code.
8803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8804 * @param pDesc Where to return the descriptor table entry.
8805 * @param uSel The selector which table entry to fetch.
8806 * @param uXcpt The exception to raise on table lookup error.
8807 */
8808VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8809{
8810 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8811}
8812
8813
8814/**
8815 * Marks the selector descriptor as accessed (only non-system descriptors).
8816 *
8817 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8818 * will therefore skip the limit checks.
8819 *
8820 * @returns Strict VBox status code.
8821 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8822 * @param uSel The selector.
8823 */
8824VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8825{
8826 /*
8827 * Get the selector table base and calculate the entry address.
8828 */
8829 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8830 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8831 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8832 GCPtr += uSel & X86_SEL_MASK;
8833
8834 /*
8835 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8836 * ugly stuff to avoid this. This will make sure it's an atomic access
8837 * as well more or less remove any question about 8-bit or 32-bit accesss.
8838 */
8839 VBOXSTRICTRC rcStrict;
8840 uint32_t volatile *pu32;
8841 if ((GCPtr & 3) == 0)
8842 {
8843 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8844 GCPtr += 2 + 2;
8845 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8846 if (rcStrict != VINF_SUCCESS)
8847 return rcStrict;
8848 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8849 }
8850 else
8851 {
8852 /* The misaligned GDT/LDT case, map the whole thing. */
8853 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8854 if (rcStrict != VINF_SUCCESS)
8855 return rcStrict;
8856 switch ((uintptr_t)pu32 & 3)
8857 {
8858 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8859 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8860 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8861 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8862 }
8863 }
8864
8865 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8866}
8867
8868/** @} */
8869
8870/** @name Opcode Helpers.
8871 * @{
8872 */
8873
8874/**
8875 * Calculates the effective address of a ModR/M memory operand.
8876 *
8877 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8878 *
8879 * @return Strict VBox status code.
8880 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8881 * @param bRm The ModRM byte.
8882 * @param cbImmAndRspOffset - First byte: The size of any immediate
8883 * following the effective address opcode bytes
8884 * (only for RIP relative addressing).
8885 * - Second byte: RSP displacement (for POP [ESP]).
8886 * @param pGCPtrEff Where to return the effective address.
8887 */
8888VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8889{
8890 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8891# define SET_SS_DEF() \
8892 do \
8893 { \
8894 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8895 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8896 } while (0)
8897
8898 if (!IEM_IS_64BIT_CODE(pVCpu))
8899 {
8900/** @todo Check the effective address size crap! */
8901 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8902 {
8903 uint16_t u16EffAddr;
8904
8905 /* Handle the disp16 form with no registers first. */
8906 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8907 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8908 else
8909 {
8910 /* Get the displacment. */
8911 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8912 {
8913 case 0: u16EffAddr = 0; break;
8914 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8915 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8916 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8917 }
8918
8919 /* Add the base and index registers to the disp. */
8920 switch (bRm & X86_MODRM_RM_MASK)
8921 {
8922 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8923 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8924 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8925 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8926 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8927 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8928 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8929 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8930 }
8931 }
8932
8933 *pGCPtrEff = u16EffAddr;
8934 }
8935 else
8936 {
8937 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8938 uint32_t u32EffAddr;
8939
8940 /* Handle the disp32 form with no registers first. */
8941 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8942 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8943 else
8944 {
8945 /* Get the register (or SIB) value. */
8946 switch ((bRm & X86_MODRM_RM_MASK))
8947 {
8948 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8949 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8950 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8951 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8952 case 4: /* SIB */
8953 {
8954 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8955
8956 /* Get the index and scale it. */
8957 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8958 {
8959 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8960 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8961 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8962 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8963 case 4: u32EffAddr = 0; /*none */ break;
8964 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8965 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8966 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8967 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8968 }
8969 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8970
8971 /* add base */
8972 switch (bSib & X86_SIB_BASE_MASK)
8973 {
8974 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8975 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8976 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8977 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8978 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8979 case 5:
8980 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8981 {
8982 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8983 SET_SS_DEF();
8984 }
8985 else
8986 {
8987 uint32_t u32Disp;
8988 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8989 u32EffAddr += u32Disp;
8990 }
8991 break;
8992 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8993 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8994 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8995 }
8996 break;
8997 }
8998 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8999 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9000 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9001 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9002 }
9003
9004 /* Get and add the displacement. */
9005 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9006 {
9007 case 0:
9008 break;
9009 case 1:
9010 {
9011 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9012 u32EffAddr += i8Disp;
9013 break;
9014 }
9015 case 2:
9016 {
9017 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9018 u32EffAddr += u32Disp;
9019 break;
9020 }
9021 default:
9022 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9023 }
9024
9025 }
9026 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9027 *pGCPtrEff = u32EffAddr;
9028 else
9029 {
9030 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9031 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9032 }
9033 }
9034 }
9035 else
9036 {
9037 uint64_t u64EffAddr;
9038
9039 /* Handle the rip+disp32 form with no registers first. */
9040 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9041 {
9042 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9043 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9044 }
9045 else
9046 {
9047 /* Get the register (or SIB) value. */
9048 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9049 {
9050 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9051 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9052 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9053 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9054 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9055 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9056 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9057 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9058 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9059 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9060 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9061 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9062 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9063 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9064 /* SIB */
9065 case 4:
9066 case 12:
9067 {
9068 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9069
9070 /* Get the index and scale it. */
9071 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9072 {
9073 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9074 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9075 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9076 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9077 case 4: u64EffAddr = 0; /*none */ break;
9078 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9079 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9080 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9081 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9082 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9083 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9084 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9085 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9086 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9087 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9088 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9089 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9090 }
9091 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9092
9093 /* add base */
9094 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9095 {
9096 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9097 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9098 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9099 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9100 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9101 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9102 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9103 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9104 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9105 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9106 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9107 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9108 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9109 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9110 /* complicated encodings */
9111 case 5:
9112 case 13:
9113 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9114 {
9115 if (!pVCpu->iem.s.uRexB)
9116 {
9117 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9118 SET_SS_DEF();
9119 }
9120 else
9121 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9122 }
9123 else
9124 {
9125 uint32_t u32Disp;
9126 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9127 u64EffAddr += (int32_t)u32Disp;
9128 }
9129 break;
9130 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9131 }
9132 break;
9133 }
9134 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9135 }
9136
9137 /* Get and add the displacement. */
9138 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9139 {
9140 case 0:
9141 break;
9142 case 1:
9143 {
9144 int8_t i8Disp;
9145 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9146 u64EffAddr += i8Disp;
9147 break;
9148 }
9149 case 2:
9150 {
9151 uint32_t u32Disp;
9152 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9153 u64EffAddr += (int32_t)u32Disp;
9154 break;
9155 }
9156 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9157 }
9158
9159 }
9160
9161 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9162 *pGCPtrEff = u64EffAddr;
9163 else
9164 {
9165 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9166 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9167 }
9168 }
9169
9170 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9171 return VINF_SUCCESS;
9172}
9173
9174
9175#ifdef IEM_WITH_SETJMP
9176/**
9177 * Calculates the effective address of a ModR/M memory operand.
9178 *
9179 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9180 *
9181 * May longjmp on internal error.
9182 *
9183 * @return The effective address.
9184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9185 * @param bRm The ModRM byte.
9186 * @param cbImmAndRspOffset - First byte: The size of any immediate
9187 * following the effective address opcode bytes
9188 * (only for RIP relative addressing).
9189 * - Second byte: RSP displacement (for POP [ESP]).
9190 */
9191RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
9192{
9193 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9194# define SET_SS_DEF() \
9195 do \
9196 { \
9197 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9198 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9199 } while (0)
9200
9201 if (!IEM_IS_64BIT_CODE(pVCpu))
9202 {
9203/** @todo Check the effective address size crap! */
9204 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9205 {
9206 uint16_t u16EffAddr;
9207
9208 /* Handle the disp16 form with no registers first. */
9209 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9210 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9211 else
9212 {
9213 /* Get the displacment. */
9214 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9215 {
9216 case 0: u16EffAddr = 0; break;
9217 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9218 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9219 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9220 }
9221
9222 /* Add the base and index registers to the disp. */
9223 switch (bRm & X86_MODRM_RM_MASK)
9224 {
9225 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9226 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9227 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9228 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9229 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9230 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9231 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9232 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9233 }
9234 }
9235
9236 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9237 return u16EffAddr;
9238 }
9239
9240 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9241 uint32_t u32EffAddr;
9242
9243 /* Handle the disp32 form with no registers first. */
9244 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9245 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9246 else
9247 {
9248 /* Get the register (or SIB) value. */
9249 switch ((bRm & X86_MODRM_RM_MASK))
9250 {
9251 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9252 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9253 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9254 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9255 case 4: /* SIB */
9256 {
9257 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9258
9259 /* Get the index and scale it. */
9260 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9261 {
9262 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9263 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9264 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9265 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9266 case 4: u32EffAddr = 0; /*none */ break;
9267 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9268 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9269 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9270 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9271 }
9272 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9273
9274 /* add base */
9275 switch (bSib & X86_SIB_BASE_MASK)
9276 {
9277 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9278 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9279 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9280 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9281 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9282 case 5:
9283 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9284 {
9285 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9286 SET_SS_DEF();
9287 }
9288 else
9289 {
9290 uint32_t u32Disp;
9291 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9292 u32EffAddr += u32Disp;
9293 }
9294 break;
9295 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9296 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9297 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9298 }
9299 break;
9300 }
9301 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9302 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9303 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9304 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9305 }
9306
9307 /* Get and add the displacement. */
9308 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9309 {
9310 case 0:
9311 break;
9312 case 1:
9313 {
9314 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9315 u32EffAddr += i8Disp;
9316 break;
9317 }
9318 case 2:
9319 {
9320 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9321 u32EffAddr += u32Disp;
9322 break;
9323 }
9324 default:
9325 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9326 }
9327 }
9328
9329 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9330 {
9331 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9332 return u32EffAddr;
9333 }
9334 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9335 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9336 return u32EffAddr & UINT16_MAX;
9337 }
9338
9339 uint64_t u64EffAddr;
9340
9341 /* Handle the rip+disp32 form with no registers first. */
9342 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9343 {
9344 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9345 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9346 }
9347 else
9348 {
9349 /* Get the register (or SIB) value. */
9350 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9351 {
9352 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9353 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9354 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9355 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9356 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9357 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9358 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9359 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9360 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9361 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9362 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9363 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9364 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9365 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9366 /* SIB */
9367 case 4:
9368 case 12:
9369 {
9370 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9371
9372 /* Get the index and scale it. */
9373 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9374 {
9375 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9376 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9377 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9378 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9379 case 4: u64EffAddr = 0; /*none */ break;
9380 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9381 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9382 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9383 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9384 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9385 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9386 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9387 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9388 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9389 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9390 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9391 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9392 }
9393 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9394
9395 /* add base */
9396 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9397 {
9398 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9399 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9400 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9401 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9402 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9403 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9404 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9405 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9406 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9407 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9408 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9409 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9410 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9411 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9412 /* complicated encodings */
9413 case 5:
9414 case 13:
9415 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9416 {
9417 if (!pVCpu->iem.s.uRexB)
9418 {
9419 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9420 SET_SS_DEF();
9421 }
9422 else
9423 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9424 }
9425 else
9426 {
9427 uint32_t u32Disp;
9428 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9429 u64EffAddr += (int32_t)u32Disp;
9430 }
9431 break;
9432 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9433 }
9434 break;
9435 }
9436 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9437 }
9438
9439 /* Get and add the displacement. */
9440 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9441 {
9442 case 0:
9443 break;
9444 case 1:
9445 {
9446 int8_t i8Disp;
9447 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9448 u64EffAddr += i8Disp;
9449 break;
9450 }
9451 case 2:
9452 {
9453 uint32_t u32Disp;
9454 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9455 u64EffAddr += (int32_t)u32Disp;
9456 break;
9457 }
9458 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9459 }
9460
9461 }
9462
9463 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9464 {
9465 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9466 return u64EffAddr;
9467 }
9468 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9469 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9470 return u64EffAddr & UINT32_MAX;
9471}
9472#endif /* IEM_WITH_SETJMP */
9473
9474
9475/**
9476 * Calculates the effective address of a ModR/M memory operand, extended version
9477 * for use in the recompilers.
9478 *
9479 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9480 *
9481 * @return Strict VBox status code.
9482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9483 * @param bRm The ModRM byte.
9484 * @param cbImmAndRspOffset - First byte: The size of any immediate
9485 * following the effective address opcode bytes
9486 * (only for RIP relative addressing).
9487 * - Second byte: RSP displacement (for POP [ESP]).
9488 * @param pGCPtrEff Where to return the effective address.
9489 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9490 * SIB byte (bits 39:32).
9491 */
9492VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
9493{
9494 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9495# define SET_SS_DEF() \
9496 do \
9497 { \
9498 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9499 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9500 } while (0)
9501
9502 uint64_t uInfo;
9503 if (!IEM_IS_64BIT_CODE(pVCpu))
9504 {
9505/** @todo Check the effective address size crap! */
9506 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9507 {
9508 uint16_t u16EffAddr;
9509
9510 /* Handle the disp16 form with no registers first. */
9511 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9512 {
9513 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9514 uInfo = u16EffAddr;
9515 }
9516 else
9517 {
9518 /* Get the displacment. */
9519 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9520 {
9521 case 0: u16EffAddr = 0; break;
9522 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9523 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9524 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9525 }
9526 uInfo = u16EffAddr;
9527
9528 /* Add the base and index registers to the disp. */
9529 switch (bRm & X86_MODRM_RM_MASK)
9530 {
9531 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9532 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9533 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9534 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9535 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9536 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9537 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9538 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9539 }
9540 }
9541
9542 *pGCPtrEff = u16EffAddr;
9543 }
9544 else
9545 {
9546 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9547 uint32_t u32EffAddr;
9548
9549 /* Handle the disp32 form with no registers first. */
9550 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9551 {
9552 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9553 uInfo = u32EffAddr;
9554 }
9555 else
9556 {
9557 /* Get the register (or SIB) value. */
9558 uInfo = 0;
9559 switch ((bRm & X86_MODRM_RM_MASK))
9560 {
9561 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9562 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9563 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9564 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9565 case 4: /* SIB */
9566 {
9567 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9568 uInfo = (uint64_t)bSib << 32;
9569
9570 /* Get the index and scale it. */
9571 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9572 {
9573 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9574 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9575 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9576 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9577 case 4: u32EffAddr = 0; /*none */ break;
9578 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9579 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9580 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9581 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9582 }
9583 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9584
9585 /* add base */
9586 switch (bSib & X86_SIB_BASE_MASK)
9587 {
9588 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9589 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9590 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9591 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9592 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9593 case 5:
9594 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9595 {
9596 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9597 SET_SS_DEF();
9598 }
9599 else
9600 {
9601 uint32_t u32Disp;
9602 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9603 u32EffAddr += u32Disp;
9604 uInfo |= u32Disp;
9605 }
9606 break;
9607 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9608 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9609 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9610 }
9611 break;
9612 }
9613 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9614 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9615 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9616 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9617 }
9618
9619 /* Get and add the displacement. */
9620 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9621 {
9622 case 0:
9623 break;
9624 case 1:
9625 {
9626 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9627 u32EffAddr += i8Disp;
9628 uInfo |= (uint32_t)(int32_t)i8Disp;
9629 break;
9630 }
9631 case 2:
9632 {
9633 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9634 u32EffAddr += u32Disp;
9635 uInfo |= (uint32_t)u32Disp;
9636 break;
9637 }
9638 default:
9639 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9640 }
9641
9642 }
9643 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9644 *pGCPtrEff = u32EffAddr;
9645 else
9646 {
9647 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9648 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9649 }
9650 }
9651 }
9652 else
9653 {
9654 uint64_t u64EffAddr;
9655
9656 /* Handle the rip+disp32 form with no registers first. */
9657 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9658 {
9659 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9660 uInfo = (uint32_t)u64EffAddr;
9661 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9662 }
9663 else
9664 {
9665 /* Get the register (or SIB) value. */
9666 uInfo = 0;
9667 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9668 {
9669 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9670 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9671 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9672 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9673 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9674 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9675 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9676 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9677 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9678 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9679 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9680 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9681 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9682 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9683 /* SIB */
9684 case 4:
9685 case 12:
9686 {
9687 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9688 uInfo = (uint64_t)bSib << 32;
9689
9690 /* Get the index and scale it. */
9691 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9692 {
9693 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9694 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9695 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9696 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9697 case 4: u64EffAddr = 0; /*none */ break;
9698 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9699 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9700 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9701 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9702 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9703 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9704 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9705 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9706 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9707 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9708 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9709 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9710 }
9711 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9712
9713 /* add base */
9714 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9715 {
9716 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9717 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9718 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9719 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9720 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9721 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9722 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9723 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9724 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9725 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9726 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9727 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9728 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9729 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9730 /* complicated encodings */
9731 case 5:
9732 case 13:
9733 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9734 {
9735 if (!pVCpu->iem.s.uRexB)
9736 {
9737 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9738 SET_SS_DEF();
9739 }
9740 else
9741 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9742 }
9743 else
9744 {
9745 uint32_t u32Disp;
9746 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9747 u64EffAddr += (int32_t)u32Disp;
9748 uInfo |= u32Disp;
9749 }
9750 break;
9751 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9752 }
9753 break;
9754 }
9755 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9756 }
9757
9758 /* Get and add the displacement. */
9759 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9760 {
9761 case 0:
9762 break;
9763 case 1:
9764 {
9765 int8_t i8Disp;
9766 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9767 u64EffAddr += i8Disp;
9768 uInfo |= (uint32_t)(int32_t)i8Disp;
9769 break;
9770 }
9771 case 2:
9772 {
9773 uint32_t u32Disp;
9774 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9775 u64EffAddr += (int32_t)u32Disp;
9776 uInfo |= u32Disp;
9777 break;
9778 }
9779 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9780 }
9781
9782 }
9783
9784 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9785 *pGCPtrEff = u64EffAddr;
9786 else
9787 {
9788 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9789 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9790 }
9791 }
9792 *puInfo = uInfo;
9793
9794 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9795 return VINF_SUCCESS;
9796}
9797
9798/** @} */
9799
9800
9801#ifdef LOG_ENABLED
9802/**
9803 * Logs the current instruction.
9804 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9805 * @param fSameCtx Set if we have the same context information as the VMM,
9806 * clear if we may have already executed an instruction in
9807 * our debug context. When clear, we assume IEMCPU holds
9808 * valid CPU mode info.
9809 *
9810 * The @a fSameCtx parameter is now misleading and obsolete.
9811 * @param pszFunction The IEM function doing the execution.
9812 */
9813static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9814{
9815# ifdef IN_RING3
9816 if (LogIs2Enabled())
9817 {
9818 char szInstr[256];
9819 uint32_t cbInstr = 0;
9820 if (fSameCtx)
9821 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9822 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9823 szInstr, sizeof(szInstr), &cbInstr);
9824 else
9825 {
9826 uint32_t fFlags = 0;
9827 switch (IEM_GET_CPU_MODE(pVCpu))
9828 {
9829 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9830 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9831 case IEMMODE_16BIT:
9832 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9833 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9834 else
9835 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9836 break;
9837 }
9838 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9839 szInstr, sizeof(szInstr), &cbInstr);
9840 }
9841
9842 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9843 Log2(("**** %s fExec=%x\n"
9844 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9845 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9846 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9847 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9848 " %s\n"
9849 , pszFunction, pVCpu->iem.s.fExec,
9850 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9851 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9852 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9853 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9854 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9855 szInstr));
9856
9857 if (LogIs3Enabled())
9858 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9859 }
9860 else
9861# endif
9862 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9863 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9864 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9865}
9866#endif /* LOG_ENABLED */
9867
9868
9869#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9870/**
9871 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9872 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9873 *
9874 * @returns Modified rcStrict.
9875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9876 * @param rcStrict The instruction execution status.
9877 */
9878static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9879{
9880 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9881 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9882 {
9883 /* VMX preemption timer takes priority over NMI-window exits. */
9884 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9885 {
9886 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9887 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9888 }
9889 /*
9890 * Check remaining intercepts.
9891 *
9892 * NMI-window and Interrupt-window VM-exits.
9893 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9894 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9895 *
9896 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9897 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9898 */
9899 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9900 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9901 && !TRPMHasTrap(pVCpu))
9902 {
9903 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9904 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9905 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9906 {
9907 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9908 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9909 }
9910 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9911 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9912 {
9913 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9914 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9915 }
9916 }
9917 }
9918 /* TPR-below threshold/APIC write has the highest priority. */
9919 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9920 {
9921 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9922 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9923 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9924 }
9925 /* MTF takes priority over VMX-preemption timer. */
9926 else
9927 {
9928 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9929 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9930 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9931 }
9932 return rcStrict;
9933}
9934#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9935
9936
9937/**
9938 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9939 * IEMExecOneWithPrefetchedByPC.
9940 *
9941 * Similar code is found in IEMExecLots.
9942 *
9943 * @return Strict VBox status code.
9944 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9945 * @param fExecuteInhibit If set, execute the instruction following CLI,
9946 * POP SS and MOV SS,GR.
9947 * @param pszFunction The calling function name.
9948 */
9949DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9950{
9951 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9952 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9953 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9954 RT_NOREF_PV(pszFunction);
9955
9956#ifdef IEM_WITH_SETJMP
9957 VBOXSTRICTRC rcStrict;
9958 IEM_TRY_SETJMP(pVCpu, rcStrict)
9959 {
9960 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9961 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9962 }
9963 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9964 {
9965 pVCpu->iem.s.cLongJumps++;
9966 }
9967 IEM_CATCH_LONGJMP_END(pVCpu);
9968#else
9969 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9970 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9971#endif
9972 if (rcStrict == VINF_SUCCESS)
9973 pVCpu->iem.s.cInstructions++;
9974 if (pVCpu->iem.s.cActiveMappings > 0)
9975 {
9976 Assert(rcStrict != VINF_SUCCESS);
9977 iemMemRollback(pVCpu);
9978 }
9979 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9980 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9981 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9982
9983//#ifdef DEBUG
9984// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9985//#endif
9986
9987#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9988 /*
9989 * Perform any VMX nested-guest instruction boundary actions.
9990 *
9991 * If any of these causes a VM-exit, we must skip executing the next
9992 * instruction (would run into stale page tables). A VM-exit makes sure
9993 * there is no interrupt-inhibition, so that should ensure we don't go
9994 * to try execute the next instruction. Clearing fExecuteInhibit is
9995 * problematic because of the setjmp/longjmp clobbering above.
9996 */
9997 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9998 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9999 || rcStrict != VINF_SUCCESS)
10000 { /* likely */ }
10001 else
10002 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10003#endif
10004
10005 /* Execute the next instruction as well if a cli, pop ss or
10006 mov ss, Gr has just completed successfully. */
10007 if ( fExecuteInhibit
10008 && rcStrict == VINF_SUCCESS
10009 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
10010 {
10011 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
10012 if (rcStrict == VINF_SUCCESS)
10013 {
10014#ifdef LOG_ENABLED
10015 iemLogCurInstr(pVCpu, false, pszFunction);
10016#endif
10017#ifdef IEM_WITH_SETJMP
10018 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
10019 {
10020 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10021 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10022 }
10023 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10024 {
10025 pVCpu->iem.s.cLongJumps++;
10026 }
10027 IEM_CATCH_LONGJMP_END(pVCpu);
10028#else
10029 IEM_OPCODE_GET_FIRST_U8(&b);
10030 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10031#endif
10032 if (rcStrict == VINF_SUCCESS)
10033 {
10034 pVCpu->iem.s.cInstructions++;
10035#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10036 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10037 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
10038 { /* likely */ }
10039 else
10040 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10041#endif
10042 }
10043 if (pVCpu->iem.s.cActiveMappings > 0)
10044 {
10045 Assert(rcStrict != VINF_SUCCESS);
10046 iemMemRollback(pVCpu);
10047 }
10048 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10049 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10050 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10051 }
10052 else if (pVCpu->iem.s.cActiveMappings > 0)
10053 iemMemRollback(pVCpu);
10054 /** @todo drop this after we bake this change into RIP advancing. */
10055 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
10056 }
10057
10058 /*
10059 * Return value fiddling, statistics and sanity assertions.
10060 */
10061 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10062
10063 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10064 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10065 return rcStrict;
10066}
10067
10068
10069/**
10070 * Execute one instruction.
10071 *
10072 * @return Strict VBox status code.
10073 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10074 */
10075VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
10076{
10077 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
10078#ifdef LOG_ENABLED
10079 iemLogCurInstr(pVCpu, true, "IEMExecOne");
10080#endif
10081
10082 /*
10083 * Do the decoding and emulation.
10084 */
10085 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10086 if (rcStrict == VINF_SUCCESS)
10087 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
10088 else if (pVCpu->iem.s.cActiveMappings > 0)
10089 iemMemRollback(pVCpu);
10090
10091 if (rcStrict != VINF_SUCCESS)
10092 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10093 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10094 return rcStrict;
10095}
10096
10097
10098VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10099{
10100 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10101 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10102 if (rcStrict == VINF_SUCCESS)
10103 {
10104 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
10105 if (pcbWritten)
10106 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10107 }
10108 else if (pVCpu->iem.s.cActiveMappings > 0)
10109 iemMemRollback(pVCpu);
10110
10111 return rcStrict;
10112}
10113
10114
10115VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10116 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10117{
10118 VBOXSTRICTRC rcStrict;
10119 if ( cbOpcodeBytes
10120 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10121 {
10122 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
10123#ifdef IEM_WITH_CODE_TLB
10124 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10125 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10126 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10127 pVCpu->iem.s.offCurInstrStart = 0;
10128 pVCpu->iem.s.offInstrNextByte = 0;
10129 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10130#else
10131 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10132 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10133#endif
10134 rcStrict = VINF_SUCCESS;
10135 }
10136 else
10137 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10138 if (rcStrict == VINF_SUCCESS)
10139 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
10140 else if (pVCpu->iem.s.cActiveMappings > 0)
10141 iemMemRollback(pVCpu);
10142
10143 return rcStrict;
10144}
10145
10146
10147VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10148{
10149 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10150 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10151 if (rcStrict == VINF_SUCCESS)
10152 {
10153 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
10154 if (pcbWritten)
10155 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10156 }
10157 else if (pVCpu->iem.s.cActiveMappings > 0)
10158 iemMemRollback(pVCpu);
10159
10160 return rcStrict;
10161}
10162
10163
10164VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10165 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10166{
10167 VBOXSTRICTRC rcStrict;
10168 if ( cbOpcodeBytes
10169 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10170 {
10171 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
10172#ifdef IEM_WITH_CODE_TLB
10173 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10174 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10175 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10176 pVCpu->iem.s.offCurInstrStart = 0;
10177 pVCpu->iem.s.offInstrNextByte = 0;
10178 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10179#else
10180 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10181 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10182#endif
10183 rcStrict = VINF_SUCCESS;
10184 }
10185 else
10186 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10187 if (rcStrict == VINF_SUCCESS)
10188 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
10189 else if (pVCpu->iem.s.cActiveMappings > 0)
10190 iemMemRollback(pVCpu);
10191
10192 return rcStrict;
10193}
10194
10195
10196/**
10197 * For handling split cacheline lock operations when the host has split-lock
10198 * detection enabled.
10199 *
10200 * This will cause the interpreter to disregard the lock prefix and implicit
10201 * locking (xchg).
10202 *
10203 * @returns Strict VBox status code.
10204 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10205 */
10206VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
10207{
10208 /*
10209 * Do the decoding and emulation.
10210 */
10211 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
10212 if (rcStrict == VINF_SUCCESS)
10213 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
10214 else if (pVCpu->iem.s.cActiveMappings > 0)
10215 iemMemRollback(pVCpu);
10216
10217 if (rcStrict != VINF_SUCCESS)
10218 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10219 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10220 return rcStrict;
10221}
10222
10223
10224/**
10225 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
10226 * inject a pending TRPM trap.
10227 */
10228VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
10229{
10230 Assert(TRPMHasTrap(pVCpu));
10231
10232 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10233 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
10234 {
10235 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
10236#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10237 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
10238 if (fIntrEnabled)
10239 {
10240 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10241 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10242 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10243 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10244 else
10245 {
10246 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10247 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10248 }
10249 }
10250#else
10251 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10252#endif
10253 if (fIntrEnabled)
10254 {
10255 uint8_t u8TrapNo;
10256 TRPMEVENT enmType;
10257 uint32_t uErrCode;
10258 RTGCPTR uCr2;
10259 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
10260 AssertRC(rc2);
10261 Assert(enmType == TRPM_HARDWARE_INT);
10262 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
10263
10264 TRPMResetTrap(pVCpu);
10265
10266#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10267 /* Injecting an event may cause a VM-exit. */
10268 if ( rcStrict != VINF_SUCCESS
10269 && rcStrict != VINF_IEM_RAISED_XCPT)
10270 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10271#else
10272 NOREF(rcStrict);
10273#endif
10274 }
10275 }
10276
10277 return VINF_SUCCESS;
10278}
10279
10280
10281VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10282{
10283 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10284 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10285 Assert(cMaxInstructions > 0);
10286
10287 /*
10288 * See if there is an interrupt pending in TRPM, inject it if we can.
10289 */
10290 /** @todo What if we are injecting an exception and not an interrupt? Is that
10291 * possible here? For now we assert it is indeed only an interrupt. */
10292 if (!TRPMHasTrap(pVCpu))
10293 { /* likely */ }
10294 else
10295 {
10296 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
10297 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10298 { /*likely */ }
10299 else
10300 return rcStrict;
10301 }
10302
10303 /*
10304 * Initial decoder init w/ prefetch, then setup setjmp.
10305 */
10306 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10307 if (rcStrict == VINF_SUCCESS)
10308 {
10309#ifdef IEM_WITH_SETJMP
10310 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10311 IEM_TRY_SETJMP(pVCpu, rcStrict)
10312#endif
10313 {
10314 /*
10315 * The run loop. We limit ourselves to 4096 instructions right now.
10316 */
10317 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10318 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10319 for (;;)
10320 {
10321 /*
10322 * Log the state.
10323 */
10324#ifdef LOG_ENABLED
10325 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10326#endif
10327
10328 /*
10329 * Do the decoding and emulation.
10330 */
10331 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10332 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10333#ifdef VBOX_STRICT
10334 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10335#endif
10336 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10337 {
10338 Assert(pVCpu->iem.s.cActiveMappings == 0);
10339 pVCpu->iem.s.cInstructions++;
10340
10341#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10342 /* Perform any VMX nested-guest instruction boundary actions. */
10343 uint64_t fCpu = pVCpu->fLocalForcedActions;
10344 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10345 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10346 { /* likely */ }
10347 else
10348 {
10349 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10350 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10351 fCpu = pVCpu->fLocalForcedActions;
10352 else
10353 {
10354 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10355 break;
10356 }
10357 }
10358#endif
10359 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10360 {
10361#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10362 uint64_t fCpu = pVCpu->fLocalForcedActions;
10363#endif
10364 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10365 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10366 | VMCPU_FF_TLB_FLUSH
10367 | VMCPU_FF_UNHALT );
10368
10369 if (RT_LIKELY( ( !fCpu
10370 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10371 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10372 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10373 {
10374 if (--cMaxInstructionsGccStupidity > 0)
10375 {
10376 /* Poll timers every now an then according to the caller's specs. */
10377 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10378 || !TMTimerPollBool(pVM, pVCpu))
10379 {
10380 Assert(pVCpu->iem.s.cActiveMappings == 0);
10381 iemReInitDecoder(pVCpu);
10382 continue;
10383 }
10384 }
10385 }
10386 }
10387 Assert(pVCpu->iem.s.cActiveMappings == 0);
10388 }
10389 else if (pVCpu->iem.s.cActiveMappings > 0)
10390 iemMemRollback(pVCpu);
10391 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10392 break;
10393 }
10394 }
10395#ifdef IEM_WITH_SETJMP
10396 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10397 {
10398 if (pVCpu->iem.s.cActiveMappings > 0)
10399 iemMemRollback(pVCpu);
10400# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10401 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10402# endif
10403 pVCpu->iem.s.cLongJumps++;
10404 }
10405 IEM_CATCH_LONGJMP_END(pVCpu);
10406#endif
10407
10408 /*
10409 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10410 */
10411 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10412 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10413 }
10414 else
10415 {
10416 if (pVCpu->iem.s.cActiveMappings > 0)
10417 iemMemRollback(pVCpu);
10418
10419#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10420 /*
10421 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10422 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10423 */
10424 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10425#endif
10426 }
10427
10428 /*
10429 * Maybe re-enter raw-mode and log.
10430 */
10431 if (rcStrict != VINF_SUCCESS)
10432 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10433 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10434 if (pcInstructions)
10435 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10436 return rcStrict;
10437}
10438
10439
10440/**
10441 * Interface used by EMExecuteExec, does exit statistics and limits.
10442 *
10443 * @returns Strict VBox status code.
10444 * @param pVCpu The cross context virtual CPU structure.
10445 * @param fWillExit To be defined.
10446 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10447 * @param cMaxInstructions Maximum number of instructions to execute.
10448 * @param cMaxInstructionsWithoutExits
10449 * The max number of instructions without exits.
10450 * @param pStats Where to return statistics.
10451 */
10452VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10453 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10454{
10455 NOREF(fWillExit); /** @todo define flexible exit crits */
10456
10457 /*
10458 * Initialize return stats.
10459 */
10460 pStats->cInstructions = 0;
10461 pStats->cExits = 0;
10462 pStats->cMaxExitDistance = 0;
10463 pStats->cReserved = 0;
10464
10465 /*
10466 * Initial decoder init w/ prefetch, then setup setjmp.
10467 */
10468 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10469 if (rcStrict == VINF_SUCCESS)
10470 {
10471#ifdef IEM_WITH_SETJMP
10472 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10473 IEM_TRY_SETJMP(pVCpu, rcStrict)
10474#endif
10475 {
10476#ifdef IN_RING0
10477 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10478#endif
10479 uint32_t cInstructionSinceLastExit = 0;
10480
10481 /*
10482 * The run loop. We limit ourselves to 4096 instructions right now.
10483 */
10484 PVM pVM = pVCpu->CTX_SUFF(pVM);
10485 for (;;)
10486 {
10487 /*
10488 * Log the state.
10489 */
10490#ifdef LOG_ENABLED
10491 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10492#endif
10493
10494 /*
10495 * Do the decoding and emulation.
10496 */
10497 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10498
10499 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10500 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10501
10502 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10503 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10504 {
10505 pStats->cExits += 1;
10506 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10507 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10508 cInstructionSinceLastExit = 0;
10509 }
10510
10511 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10512 {
10513 Assert(pVCpu->iem.s.cActiveMappings == 0);
10514 pVCpu->iem.s.cInstructions++;
10515 pStats->cInstructions++;
10516 cInstructionSinceLastExit++;
10517
10518#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10519 /* Perform any VMX nested-guest instruction boundary actions. */
10520 uint64_t fCpu = pVCpu->fLocalForcedActions;
10521 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10522 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10523 { /* likely */ }
10524 else
10525 {
10526 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10527 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10528 fCpu = pVCpu->fLocalForcedActions;
10529 else
10530 {
10531 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10532 break;
10533 }
10534 }
10535#endif
10536 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10537 {
10538#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10539 uint64_t fCpu = pVCpu->fLocalForcedActions;
10540#endif
10541 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10542 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10543 | VMCPU_FF_TLB_FLUSH
10544 | VMCPU_FF_UNHALT );
10545 if (RT_LIKELY( ( ( !fCpu
10546 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10547 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10548 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10549 || pStats->cInstructions < cMinInstructions))
10550 {
10551 if (pStats->cInstructions < cMaxInstructions)
10552 {
10553 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10554 {
10555#ifdef IN_RING0
10556 if ( !fCheckPreemptionPending
10557 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10558#endif
10559 {
10560 Assert(pVCpu->iem.s.cActiveMappings == 0);
10561 iemReInitDecoder(pVCpu);
10562 continue;
10563 }
10564#ifdef IN_RING0
10565 rcStrict = VINF_EM_RAW_INTERRUPT;
10566 break;
10567#endif
10568 }
10569 }
10570 }
10571 Assert(!(fCpu & VMCPU_FF_IEM));
10572 }
10573 Assert(pVCpu->iem.s.cActiveMappings == 0);
10574 }
10575 else if (pVCpu->iem.s.cActiveMappings > 0)
10576 iemMemRollback(pVCpu);
10577 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10578 break;
10579 }
10580 }
10581#ifdef IEM_WITH_SETJMP
10582 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10583 {
10584 if (pVCpu->iem.s.cActiveMappings > 0)
10585 iemMemRollback(pVCpu);
10586 pVCpu->iem.s.cLongJumps++;
10587 }
10588 IEM_CATCH_LONGJMP_END(pVCpu);
10589#endif
10590
10591 /*
10592 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10593 */
10594 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10595 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10596 }
10597 else
10598 {
10599 if (pVCpu->iem.s.cActiveMappings > 0)
10600 iemMemRollback(pVCpu);
10601
10602#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10603 /*
10604 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10605 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10606 */
10607 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10608#endif
10609 }
10610
10611 /*
10612 * Maybe re-enter raw-mode and log.
10613 */
10614 if (rcStrict != VINF_SUCCESS)
10615 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10616 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10617 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10618 return rcStrict;
10619}
10620
10621
10622/**
10623 * Injects a trap, fault, abort, software interrupt or external interrupt.
10624 *
10625 * The parameter list matches TRPMQueryTrapAll pretty closely.
10626 *
10627 * @returns Strict VBox status code.
10628 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10629 * @param u8TrapNo The trap number.
10630 * @param enmType What type is it (trap/fault/abort), software
10631 * interrupt or hardware interrupt.
10632 * @param uErrCode The error code if applicable.
10633 * @param uCr2 The CR2 value if applicable.
10634 * @param cbInstr The instruction length (only relevant for
10635 * software interrupts).
10636 */
10637VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10638 uint8_t cbInstr)
10639{
10640 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10641#ifdef DBGFTRACE_ENABLED
10642 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10643 u8TrapNo, enmType, uErrCode, uCr2);
10644#endif
10645
10646 uint32_t fFlags;
10647 switch (enmType)
10648 {
10649 case TRPM_HARDWARE_INT:
10650 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10651 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10652 uErrCode = uCr2 = 0;
10653 break;
10654
10655 case TRPM_SOFTWARE_INT:
10656 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10657 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10658 uErrCode = uCr2 = 0;
10659 break;
10660
10661 case TRPM_TRAP:
10662 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10663 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10664 if (u8TrapNo == X86_XCPT_PF)
10665 fFlags |= IEM_XCPT_FLAGS_CR2;
10666 switch (u8TrapNo)
10667 {
10668 case X86_XCPT_DF:
10669 case X86_XCPT_TS:
10670 case X86_XCPT_NP:
10671 case X86_XCPT_SS:
10672 case X86_XCPT_PF:
10673 case X86_XCPT_AC:
10674 case X86_XCPT_GP:
10675 fFlags |= IEM_XCPT_FLAGS_ERR;
10676 break;
10677 }
10678 break;
10679
10680 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10681 }
10682
10683 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10684
10685 if (pVCpu->iem.s.cActiveMappings > 0)
10686 iemMemRollback(pVCpu);
10687
10688 return rcStrict;
10689}
10690
10691
10692/**
10693 * Injects the active TRPM event.
10694 *
10695 * @returns Strict VBox status code.
10696 * @param pVCpu The cross context virtual CPU structure.
10697 */
10698VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10699{
10700#ifndef IEM_IMPLEMENTS_TASKSWITCH
10701 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10702#else
10703 uint8_t u8TrapNo;
10704 TRPMEVENT enmType;
10705 uint32_t uErrCode;
10706 RTGCUINTPTR uCr2;
10707 uint8_t cbInstr;
10708 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10709 if (RT_FAILURE(rc))
10710 return rc;
10711
10712 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10713 * ICEBP \#DB injection as a special case. */
10714 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10715#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10716 if (rcStrict == VINF_SVM_VMEXIT)
10717 rcStrict = VINF_SUCCESS;
10718#endif
10719#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10720 if (rcStrict == VINF_VMX_VMEXIT)
10721 rcStrict = VINF_SUCCESS;
10722#endif
10723 /** @todo Are there any other codes that imply the event was successfully
10724 * delivered to the guest? See @bugref{6607}. */
10725 if ( rcStrict == VINF_SUCCESS
10726 || rcStrict == VINF_IEM_RAISED_XCPT)
10727 TRPMResetTrap(pVCpu);
10728
10729 return rcStrict;
10730#endif
10731}
10732
10733
10734VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10735{
10736 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10737 return VERR_NOT_IMPLEMENTED;
10738}
10739
10740
10741VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10742{
10743 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10744 return VERR_NOT_IMPLEMENTED;
10745}
10746
10747
10748/**
10749 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10750 *
10751 * This API ASSUMES that the caller has already verified that the guest code is
10752 * allowed to access the I/O port. (The I/O port is in the DX register in the
10753 * guest state.)
10754 *
10755 * @returns Strict VBox status code.
10756 * @param pVCpu The cross context virtual CPU structure.
10757 * @param cbValue The size of the I/O port access (1, 2, or 4).
10758 * @param enmAddrMode The addressing mode.
10759 * @param fRepPrefix Indicates whether a repeat prefix is used
10760 * (doesn't matter which for this instruction).
10761 * @param cbInstr The instruction length in bytes.
10762 * @param iEffSeg The effective segment address.
10763 * @param fIoChecked Whether the access to the I/O port has been
10764 * checked or not. It's typically checked in the
10765 * HM scenario.
10766 */
10767VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10768 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10769{
10770 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10771 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10772
10773 /*
10774 * State init.
10775 */
10776 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10777
10778 /*
10779 * Switch orgy for getting to the right handler.
10780 */
10781 VBOXSTRICTRC rcStrict;
10782 if (fRepPrefix)
10783 {
10784 switch (enmAddrMode)
10785 {
10786 case IEMMODE_16BIT:
10787 switch (cbValue)
10788 {
10789 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10790 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10791 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10792 default:
10793 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10794 }
10795 break;
10796
10797 case IEMMODE_32BIT:
10798 switch (cbValue)
10799 {
10800 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10801 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10802 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10803 default:
10804 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10805 }
10806 break;
10807
10808 case IEMMODE_64BIT:
10809 switch (cbValue)
10810 {
10811 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10812 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10813 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10814 default:
10815 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10816 }
10817 break;
10818
10819 default:
10820 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10821 }
10822 }
10823 else
10824 {
10825 switch (enmAddrMode)
10826 {
10827 case IEMMODE_16BIT:
10828 switch (cbValue)
10829 {
10830 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10831 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10832 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10833 default:
10834 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10835 }
10836 break;
10837
10838 case IEMMODE_32BIT:
10839 switch (cbValue)
10840 {
10841 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10842 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10843 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10844 default:
10845 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10846 }
10847 break;
10848
10849 case IEMMODE_64BIT:
10850 switch (cbValue)
10851 {
10852 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10853 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10854 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10855 default:
10856 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10857 }
10858 break;
10859
10860 default:
10861 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10862 }
10863 }
10864
10865 if (pVCpu->iem.s.cActiveMappings)
10866 iemMemRollback(pVCpu);
10867
10868 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10869}
10870
10871
10872/**
10873 * Interface for HM and EM for executing string I/O IN (read) instructions.
10874 *
10875 * This API ASSUMES that the caller has already verified that the guest code is
10876 * allowed to access the I/O port. (The I/O port is in the DX register in the
10877 * guest state.)
10878 *
10879 * @returns Strict VBox status code.
10880 * @param pVCpu The cross context virtual CPU structure.
10881 * @param cbValue The size of the I/O port access (1, 2, or 4).
10882 * @param enmAddrMode The addressing mode.
10883 * @param fRepPrefix Indicates whether a repeat prefix is used
10884 * (doesn't matter which for this instruction).
10885 * @param cbInstr The instruction length in bytes.
10886 * @param fIoChecked Whether the access to the I/O port has been
10887 * checked or not. It's typically checked in the
10888 * HM scenario.
10889 */
10890VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10891 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10892{
10893 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10894
10895 /*
10896 * State init.
10897 */
10898 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10899
10900 /*
10901 * Switch orgy for getting to the right handler.
10902 */
10903 VBOXSTRICTRC rcStrict;
10904 if (fRepPrefix)
10905 {
10906 switch (enmAddrMode)
10907 {
10908 case IEMMODE_16BIT:
10909 switch (cbValue)
10910 {
10911 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10912 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10913 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10914 default:
10915 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10916 }
10917 break;
10918
10919 case IEMMODE_32BIT:
10920 switch (cbValue)
10921 {
10922 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10923 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10924 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10925 default:
10926 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10927 }
10928 break;
10929
10930 case IEMMODE_64BIT:
10931 switch (cbValue)
10932 {
10933 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10934 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10935 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10936 default:
10937 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10938 }
10939 break;
10940
10941 default:
10942 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10943 }
10944 }
10945 else
10946 {
10947 switch (enmAddrMode)
10948 {
10949 case IEMMODE_16BIT:
10950 switch (cbValue)
10951 {
10952 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10953 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10954 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10955 default:
10956 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10957 }
10958 break;
10959
10960 case IEMMODE_32BIT:
10961 switch (cbValue)
10962 {
10963 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10964 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10965 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10966 default:
10967 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10968 }
10969 break;
10970
10971 case IEMMODE_64BIT:
10972 switch (cbValue)
10973 {
10974 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10975 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10976 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10977 default:
10978 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10979 }
10980 break;
10981
10982 default:
10983 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10984 }
10985 }
10986
10987 if ( pVCpu->iem.s.cActiveMappings == 0
10988 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10989 { /* likely */ }
10990 else
10991 {
10992 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10993 iemMemRollback(pVCpu);
10994 }
10995 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10996}
10997
10998
10999/**
11000 * Interface for rawmode to write execute an OUT instruction.
11001 *
11002 * @returns Strict VBox status code.
11003 * @param pVCpu The cross context virtual CPU structure.
11004 * @param cbInstr The instruction length in bytes.
11005 * @param u16Port The port to read.
11006 * @param fImm Whether the port is specified using an immediate operand or
11007 * using the implicit DX register.
11008 * @param cbReg The register size.
11009 *
11010 * @remarks In ring-0 not all of the state needs to be synced in.
11011 */
11012VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11013{
11014 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11015 Assert(cbReg <= 4 && cbReg != 3);
11016
11017 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11018 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
11019 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11020 Assert(!pVCpu->iem.s.cActiveMappings);
11021 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11022}
11023
11024
11025/**
11026 * Interface for rawmode to write execute an IN instruction.
11027 *
11028 * @returns Strict VBox status code.
11029 * @param pVCpu The cross context virtual CPU structure.
11030 * @param cbInstr The instruction length in bytes.
11031 * @param u16Port The port to read.
11032 * @param fImm Whether the port is specified using an immediate operand or
11033 * using the implicit DX.
11034 * @param cbReg The register size.
11035 */
11036VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11037{
11038 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11039 Assert(cbReg <= 4 && cbReg != 3);
11040
11041 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11042 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
11043 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11044 Assert(!pVCpu->iem.s.cActiveMappings);
11045 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11046}
11047
11048
11049/**
11050 * Interface for HM and EM to write to a CRx register.
11051 *
11052 * @returns Strict VBox status code.
11053 * @param pVCpu The cross context virtual CPU structure.
11054 * @param cbInstr The instruction length in bytes.
11055 * @param iCrReg The control register number (destination).
11056 * @param iGReg The general purpose register number (source).
11057 *
11058 * @remarks In ring-0 not all of the state needs to be synced in.
11059 */
11060VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11061{
11062 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11063 Assert(iCrReg < 16);
11064 Assert(iGReg < 16);
11065
11066 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11067 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11068 Assert(!pVCpu->iem.s.cActiveMappings);
11069 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11070}
11071
11072
11073/**
11074 * Interface for HM and EM to read from a CRx register.
11075 *
11076 * @returns Strict VBox status code.
11077 * @param pVCpu The cross context virtual CPU structure.
11078 * @param cbInstr The instruction length in bytes.
11079 * @param iGReg The general purpose register number (destination).
11080 * @param iCrReg The control register number (source).
11081 *
11082 * @remarks In ring-0 not all of the state needs to be synced in.
11083 */
11084VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11085{
11086 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11087 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
11088 | CPUMCTX_EXTRN_APIC_TPR);
11089 Assert(iCrReg < 16);
11090 Assert(iGReg < 16);
11091
11092 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11093 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11094 Assert(!pVCpu->iem.s.cActiveMappings);
11095 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11096}
11097
11098
11099/**
11100 * Interface for HM and EM to write to a DRx register.
11101 *
11102 * @returns Strict VBox status code.
11103 * @param pVCpu The cross context virtual CPU structure.
11104 * @param cbInstr The instruction length in bytes.
11105 * @param iDrReg The debug register number (destination).
11106 * @param iGReg The general purpose register number (source).
11107 *
11108 * @remarks In ring-0 not all of the state needs to be synced in.
11109 */
11110VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
11111{
11112 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11113 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11114 Assert(iDrReg < 8);
11115 Assert(iGReg < 16);
11116
11117 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11118 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
11119 Assert(!pVCpu->iem.s.cActiveMappings);
11120 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11121}
11122
11123
11124/**
11125 * Interface for HM and EM to read from a DRx register.
11126 *
11127 * @returns Strict VBox status code.
11128 * @param pVCpu The cross context virtual CPU structure.
11129 * @param cbInstr The instruction length in bytes.
11130 * @param iGReg The general purpose register number (destination).
11131 * @param iDrReg The debug register number (source).
11132 *
11133 * @remarks In ring-0 not all of the state needs to be synced in.
11134 */
11135VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
11136{
11137 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11138 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11139 Assert(iDrReg < 8);
11140 Assert(iGReg < 16);
11141
11142 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11143 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
11144 Assert(!pVCpu->iem.s.cActiveMappings);
11145 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11146}
11147
11148
11149/**
11150 * Interface for HM and EM to clear the CR0[TS] bit.
11151 *
11152 * @returns Strict VBox status code.
11153 * @param pVCpu The cross context virtual CPU structure.
11154 * @param cbInstr The instruction length in bytes.
11155 *
11156 * @remarks In ring-0 not all of the state needs to be synced in.
11157 */
11158VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
11159{
11160 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11161
11162 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11163 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11164 Assert(!pVCpu->iem.s.cActiveMappings);
11165 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11166}
11167
11168
11169/**
11170 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11171 *
11172 * @returns Strict VBox status code.
11173 * @param pVCpu The cross context virtual CPU structure.
11174 * @param cbInstr The instruction length in bytes.
11175 * @param uValue The value to load into CR0.
11176 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
11177 * memory operand. Otherwise pass NIL_RTGCPTR.
11178 *
11179 * @remarks In ring-0 not all of the state needs to be synced in.
11180 */
11181VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
11182{
11183 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11184
11185 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11186 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
11187 Assert(!pVCpu->iem.s.cActiveMappings);
11188 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11189}
11190
11191
11192/**
11193 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11194 *
11195 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11196 *
11197 * @returns Strict VBox status code.
11198 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11199 * @param cbInstr The instruction length in bytes.
11200 * @remarks In ring-0 not all of the state needs to be synced in.
11201 * @thread EMT(pVCpu)
11202 */
11203VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
11204{
11205 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11206
11207 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11208 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11209 Assert(!pVCpu->iem.s.cActiveMappings);
11210 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11211}
11212
11213
11214/**
11215 * Interface for HM and EM to emulate the WBINVD instruction.
11216 *
11217 * @returns Strict VBox status code.
11218 * @param pVCpu The cross context virtual CPU structure.
11219 * @param cbInstr The instruction length in bytes.
11220 *
11221 * @remarks In ring-0 not all of the state needs to be synced in.
11222 */
11223VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11224{
11225 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11226
11227 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11228 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
11229 Assert(!pVCpu->iem.s.cActiveMappings);
11230 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11231}
11232
11233
11234/**
11235 * Interface for HM and EM to emulate the INVD instruction.
11236 *
11237 * @returns Strict VBox status code.
11238 * @param pVCpu The cross context virtual CPU structure.
11239 * @param cbInstr The instruction length in bytes.
11240 *
11241 * @remarks In ring-0 not all of the state needs to be synced in.
11242 */
11243VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11244{
11245 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11246
11247 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11248 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
11249 Assert(!pVCpu->iem.s.cActiveMappings);
11250 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11251}
11252
11253
11254/**
11255 * Interface for HM and EM to emulate the INVLPG instruction.
11256 *
11257 * @returns Strict VBox status code.
11258 * @retval VINF_PGM_SYNC_CR3
11259 *
11260 * @param pVCpu The cross context virtual CPU structure.
11261 * @param cbInstr The instruction length in bytes.
11262 * @param GCPtrPage The effective address of the page to invalidate.
11263 *
11264 * @remarks In ring-0 not all of the state needs to be synced in.
11265 */
11266VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11267{
11268 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11269
11270 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11271 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11272 Assert(!pVCpu->iem.s.cActiveMappings);
11273 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11274}
11275
11276
11277/**
11278 * Interface for HM and EM to emulate the INVPCID instruction.
11279 *
11280 * @returns Strict VBox status code.
11281 * @retval VINF_PGM_SYNC_CR3
11282 *
11283 * @param pVCpu The cross context virtual CPU structure.
11284 * @param cbInstr The instruction length in bytes.
11285 * @param iEffSeg The effective segment register.
11286 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11287 * @param uType The invalidation type.
11288 *
11289 * @remarks In ring-0 not all of the state needs to be synced in.
11290 */
11291VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11292 uint64_t uType)
11293{
11294 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11295
11296 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11297 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11298 Assert(!pVCpu->iem.s.cActiveMappings);
11299 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11300}
11301
11302
11303/**
11304 * Interface for HM and EM to emulate the CPUID instruction.
11305 *
11306 * @returns Strict VBox status code.
11307 *
11308 * @param pVCpu The cross context virtual CPU structure.
11309 * @param cbInstr The instruction length in bytes.
11310 *
11311 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11312 */
11313VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11314{
11315 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11316 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11317
11318 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11319 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11320 Assert(!pVCpu->iem.s.cActiveMappings);
11321 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11322}
11323
11324
11325/**
11326 * Interface for HM and EM to emulate the RDPMC instruction.
11327 *
11328 * @returns Strict VBox status code.
11329 *
11330 * @param pVCpu The cross context virtual CPU structure.
11331 * @param cbInstr The instruction length in bytes.
11332 *
11333 * @remarks Not all of the state needs to be synced in.
11334 */
11335VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11336{
11337 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11338 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11339
11340 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11341 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11342 Assert(!pVCpu->iem.s.cActiveMappings);
11343 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11344}
11345
11346
11347/**
11348 * Interface for HM and EM to emulate the RDTSC instruction.
11349 *
11350 * @returns Strict VBox status code.
11351 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11352 *
11353 * @param pVCpu The cross context virtual CPU structure.
11354 * @param cbInstr The instruction length in bytes.
11355 *
11356 * @remarks Not all of the state needs to be synced in.
11357 */
11358VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11359{
11360 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11361 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11362
11363 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11364 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11365 Assert(!pVCpu->iem.s.cActiveMappings);
11366 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11367}
11368
11369
11370/**
11371 * Interface for HM and EM to emulate the RDTSCP instruction.
11372 *
11373 * @returns Strict VBox status code.
11374 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11375 *
11376 * @param pVCpu The cross context virtual CPU structure.
11377 * @param cbInstr The instruction length in bytes.
11378 *
11379 * @remarks Not all of the state needs to be synced in. Recommended
11380 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11381 */
11382VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11383{
11384 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11385 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11386
11387 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11388 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11389 Assert(!pVCpu->iem.s.cActiveMappings);
11390 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11391}
11392
11393
11394/**
11395 * Interface for HM and EM to emulate the RDMSR instruction.
11396 *
11397 * @returns Strict VBox status code.
11398 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11399 *
11400 * @param pVCpu The cross context virtual CPU structure.
11401 * @param cbInstr The instruction length in bytes.
11402 *
11403 * @remarks Not all of the state needs to be synced in. Requires RCX and
11404 * (currently) all MSRs.
11405 */
11406VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11407{
11408 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11409 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11410
11411 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11412 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11413 Assert(!pVCpu->iem.s.cActiveMappings);
11414 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11415}
11416
11417
11418/**
11419 * Interface for HM and EM to emulate the WRMSR instruction.
11420 *
11421 * @returns Strict VBox status code.
11422 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11423 *
11424 * @param pVCpu The cross context virtual CPU structure.
11425 * @param cbInstr The instruction length in bytes.
11426 *
11427 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11428 * and (currently) all MSRs.
11429 */
11430VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11431{
11432 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11433 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11434 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11435
11436 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11437 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11438 Assert(!pVCpu->iem.s.cActiveMappings);
11439 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11440}
11441
11442
11443/**
11444 * Interface for HM and EM to emulate the MONITOR instruction.
11445 *
11446 * @returns Strict VBox status code.
11447 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11448 *
11449 * @param pVCpu The cross context virtual CPU structure.
11450 * @param cbInstr The instruction length in bytes.
11451 *
11452 * @remarks Not all of the state needs to be synced in.
11453 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11454 * are used.
11455 */
11456VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11457{
11458 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11459 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11460
11461 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11462 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11463 Assert(!pVCpu->iem.s.cActiveMappings);
11464 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11465}
11466
11467
11468/**
11469 * Interface for HM and EM to emulate the MWAIT instruction.
11470 *
11471 * @returns Strict VBox status code.
11472 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11473 *
11474 * @param pVCpu The cross context virtual CPU structure.
11475 * @param cbInstr The instruction length in bytes.
11476 *
11477 * @remarks Not all of the state needs to be synced in.
11478 */
11479VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11480{
11481 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11482 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11483
11484 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11485 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11486 Assert(!pVCpu->iem.s.cActiveMappings);
11487 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11488}
11489
11490
11491/**
11492 * Interface for HM and EM to emulate the HLT instruction.
11493 *
11494 * @returns Strict VBox status code.
11495 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11496 *
11497 * @param pVCpu The cross context virtual CPU structure.
11498 * @param cbInstr The instruction length in bytes.
11499 *
11500 * @remarks Not all of the state needs to be synced in.
11501 */
11502VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11503{
11504 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11505
11506 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11507 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11508 Assert(!pVCpu->iem.s.cActiveMappings);
11509 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11510}
11511
11512
11513/**
11514 * Checks if IEM is in the process of delivering an event (interrupt or
11515 * exception).
11516 *
11517 * @returns true if we're in the process of raising an interrupt or exception,
11518 * false otherwise.
11519 * @param pVCpu The cross context virtual CPU structure.
11520 * @param puVector Where to store the vector associated with the
11521 * currently delivered event, optional.
11522 * @param pfFlags Where to store th event delivery flags (see
11523 * IEM_XCPT_FLAGS_XXX), optional.
11524 * @param puErr Where to store the error code associated with the
11525 * event, optional.
11526 * @param puCr2 Where to store the CR2 associated with the event,
11527 * optional.
11528 * @remarks The caller should check the flags to determine if the error code and
11529 * CR2 are valid for the event.
11530 */
11531VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11532{
11533 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11534 if (fRaisingXcpt)
11535 {
11536 if (puVector)
11537 *puVector = pVCpu->iem.s.uCurXcpt;
11538 if (pfFlags)
11539 *pfFlags = pVCpu->iem.s.fCurXcpt;
11540 if (puErr)
11541 *puErr = pVCpu->iem.s.uCurXcptErr;
11542 if (puCr2)
11543 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11544 }
11545 return fRaisingXcpt;
11546}
11547
11548#ifdef IN_RING3
11549
11550/**
11551 * Handles the unlikely and probably fatal merge cases.
11552 *
11553 * @returns Merged status code.
11554 * @param rcStrict Current EM status code.
11555 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11556 * with @a rcStrict.
11557 * @param iMemMap The memory mapping index. For error reporting only.
11558 * @param pVCpu The cross context virtual CPU structure of the calling
11559 * thread, for error reporting only.
11560 */
11561DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11562 unsigned iMemMap, PVMCPUCC pVCpu)
11563{
11564 if (RT_FAILURE_NP(rcStrict))
11565 return rcStrict;
11566
11567 if (RT_FAILURE_NP(rcStrictCommit))
11568 return rcStrictCommit;
11569
11570 if (rcStrict == rcStrictCommit)
11571 return rcStrictCommit;
11572
11573 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11574 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11575 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11576 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11577 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11578 return VERR_IOM_FF_STATUS_IPE;
11579}
11580
11581
11582/**
11583 * Helper for IOMR3ProcessForceFlag.
11584 *
11585 * @returns Merged status code.
11586 * @param rcStrict Current EM status code.
11587 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11588 * with @a rcStrict.
11589 * @param iMemMap The memory mapping index. For error reporting only.
11590 * @param pVCpu The cross context virtual CPU structure of the calling
11591 * thread, for error reporting only.
11592 */
11593DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11594{
11595 /* Simple. */
11596 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11597 return rcStrictCommit;
11598
11599 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11600 return rcStrict;
11601
11602 /* EM scheduling status codes. */
11603 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11604 && rcStrict <= VINF_EM_LAST))
11605 {
11606 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11607 && rcStrictCommit <= VINF_EM_LAST))
11608 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11609 }
11610
11611 /* Unlikely */
11612 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11613}
11614
11615
11616/**
11617 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11618 *
11619 * @returns Merge between @a rcStrict and what the commit operation returned.
11620 * @param pVM The cross context VM structure.
11621 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11622 * @param rcStrict The status code returned by ring-0 or raw-mode.
11623 */
11624VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11625{
11626 /*
11627 * Reset the pending commit.
11628 */
11629 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11630 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11631 ("%#x %#x %#x\n",
11632 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11633 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11634
11635 /*
11636 * Commit the pending bounce buffers (usually just one).
11637 */
11638 unsigned cBufs = 0;
11639 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11640 while (iMemMap-- > 0)
11641 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11642 {
11643 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11644 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11645 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11646
11647 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11648 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11649 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11650
11651 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11652 {
11653 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11654 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11655 pbBuf,
11656 cbFirst,
11657 PGMACCESSORIGIN_IEM);
11658 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11659 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11660 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11661 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11662 }
11663
11664 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11665 {
11666 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11667 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11668 pbBuf + cbFirst,
11669 cbSecond,
11670 PGMACCESSORIGIN_IEM);
11671 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11672 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11673 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11674 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11675 }
11676 cBufs++;
11677 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11678 }
11679
11680 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11681 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11682 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11683 pVCpu->iem.s.cActiveMappings = 0;
11684 return rcStrict;
11685}
11686
11687#endif /* IN_RING3 */
11688
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette