VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 103894

Last change on this file since 103894 was 103886, checked in by vboxsync, 13 months ago

VMM/IEM: iemOpcodeFetchBytesJmp doc update. bugref:10370

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 450.3 KB
Line 
1/* $Id: IEMAll.cpp 103886 2024-03-18 10:21:04Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gim.h>
134#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
135# include <VBox/vmm/em.h>
136# include <VBox/vmm/hm_svm.h>
137#endif
138#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
139# include <VBox/vmm/hmvmxinline.h>
140#endif
141#include <VBox/vmm/tm.h>
142#include <VBox/vmm/dbgf.h>
143#include <VBox/vmm/dbgftrace.h>
144#include "IEMInternal.h"
145#include <VBox/vmm/vmcc.h>
146#include <VBox/log.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/dis.h>
150#include <iprt/asm-math.h>
151#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
152# include <iprt/asm-amd64-x86.h>
153#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
154# include <iprt/asm-arm.h>
155#endif
156#include <iprt/assert.h>
157#include <iprt/string.h>
158#include <iprt/x86.h>
159
160#include "IEMInline.h"
161
162
163/*********************************************************************************************************************************
164* Structures and Typedefs *
165*********************************************************************************************************************************/
166/**
167 * CPU exception classes.
168 */
169typedef enum IEMXCPTCLASS
170{
171 IEMXCPTCLASS_BENIGN,
172 IEMXCPTCLASS_CONTRIBUTORY,
173 IEMXCPTCLASS_PAGE_FAULT,
174 IEMXCPTCLASS_DOUBLE_FAULT
175} IEMXCPTCLASS;
176
177
178/*********************************************************************************************************************************
179* Global Variables *
180*********************************************************************************************************************************/
181#if defined(IEM_LOG_MEMORY_WRITES)
182/** What IEM just wrote. */
183uint8_t g_abIemWrote[256];
184/** How much IEM just wrote. */
185size_t g_cbIemWrote;
186#endif
187
188
189/*********************************************************************************************************************************
190* Internal Functions *
191*********************************************************************************************************************************/
192static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
193 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
194
195
196/**
197 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
198 * path.
199 *
200 * @returns IEM_F_BRK_PENDING_XXX or zero.
201 * @param pVCpu The cross context virtual CPU structure of the
202 * calling thread.
203 *
204 * @note Don't call directly, use iemCalcExecDbgFlags instead.
205 */
206uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
207{
208 uint32_t fExec = 0;
209
210 /*
211 * Process guest breakpoints.
212 */
213#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
214 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
215 { \
216 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
217 { \
218 case X86_DR7_RW_EO: \
219 fExec |= IEM_F_PENDING_BRK_INSTR; \
220 break; \
221 case X86_DR7_RW_WO: \
222 case X86_DR7_RW_RW: \
223 fExec |= IEM_F_PENDING_BRK_DATA; \
224 break; \
225 case X86_DR7_RW_IO: \
226 fExec |= IEM_F_PENDING_BRK_X86_IO; \
227 break; \
228 } \
229 } \
230 } while (0)
231
232 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
233 if (fGstDr7 & X86_DR7_ENABLED_MASK)
234 {
235 PROCESS_ONE_BP(fGstDr7, 0);
236 PROCESS_ONE_BP(fGstDr7, 1);
237 PROCESS_ONE_BP(fGstDr7, 2);
238 PROCESS_ONE_BP(fGstDr7, 3);
239 }
240
241 /*
242 * Process hypervisor breakpoints.
243 */
244 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
245 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
246 {
247 PROCESS_ONE_BP(fHyperDr7, 0);
248 PROCESS_ONE_BP(fHyperDr7, 1);
249 PROCESS_ONE_BP(fHyperDr7, 2);
250 PROCESS_ONE_BP(fHyperDr7, 3);
251 }
252
253 return fExec;
254}
255
256
257/**
258 * Initializes the decoder state.
259 *
260 * iemReInitDecoder is mostly a copy of this function.
261 *
262 * @param pVCpu The cross context virtual CPU structure of the
263 * calling thread.
264 * @param fExecOpts Optional execution flags:
265 * - IEM_F_BYPASS_HANDLERS
266 * - IEM_F_X86_DISREGARD_LOCK
267 */
268DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
269{
270 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
271 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
280
281 /* Execution state: */
282 uint32_t fExec;
283 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
284
285 /* Decoder state: */
286 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
288 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
289 {
290 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
291 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
292 }
293 else
294 {
295 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
296 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
297 }
298 pVCpu->iem.s.fPrefixes = 0;
299 pVCpu->iem.s.uRexReg = 0;
300 pVCpu->iem.s.uRexB = 0;
301 pVCpu->iem.s.uRexIndex = 0;
302 pVCpu->iem.s.idxPrefix = 0;
303 pVCpu->iem.s.uVex3rdReg = 0;
304 pVCpu->iem.s.uVexLength = 0;
305 pVCpu->iem.s.fEvexStuff = 0;
306 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
307#ifdef IEM_WITH_CODE_TLB
308 pVCpu->iem.s.pbInstrBuf = NULL;
309 pVCpu->iem.s.offInstrNextByte = 0;
310 pVCpu->iem.s.offCurInstrStart = 0;
311# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
312 pVCpu->iem.s.offOpcode = 0;
313# endif
314# ifdef VBOX_STRICT
315 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
316 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
317 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
318 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
319# endif
320#else
321 pVCpu->iem.s.offOpcode = 0;
322 pVCpu->iem.s.cbOpcode = 0;
323#endif
324 pVCpu->iem.s.offModRm = 0;
325 pVCpu->iem.s.cActiveMappings = 0;
326 pVCpu->iem.s.iNextMapping = 0;
327 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
328
329#ifdef DBGFTRACE_ENABLED
330 switch (IEM_GET_CPU_MODE(pVCpu))
331 {
332 case IEMMODE_64BIT:
333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
334 break;
335 case IEMMODE_32BIT:
336 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
337 break;
338 case IEMMODE_16BIT:
339 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
340 break;
341 }
342#endif
343}
344
345
346/**
347 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
348 *
349 * This is mostly a copy of iemInitDecoder.
350 *
351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
352 */
353DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
354{
355 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
364
365 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
366 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
367 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
368
369 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
370 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
371 pVCpu->iem.s.enmEffAddrMode = enmMode;
372 if (enmMode != IEMMODE_64BIT)
373 {
374 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
375 pVCpu->iem.s.enmEffOpSize = enmMode;
376 }
377 else
378 {
379 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
380 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
381 }
382 pVCpu->iem.s.fPrefixes = 0;
383 pVCpu->iem.s.uRexReg = 0;
384 pVCpu->iem.s.uRexB = 0;
385 pVCpu->iem.s.uRexIndex = 0;
386 pVCpu->iem.s.idxPrefix = 0;
387 pVCpu->iem.s.uVex3rdReg = 0;
388 pVCpu->iem.s.uVexLength = 0;
389 pVCpu->iem.s.fEvexStuff = 0;
390 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
391#ifdef IEM_WITH_CODE_TLB
392 if (pVCpu->iem.s.pbInstrBuf)
393 {
394 uint64_t off = (enmMode == IEMMODE_64BIT
395 ? pVCpu->cpum.GstCtx.rip
396 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
397 - pVCpu->iem.s.uInstrBufPc;
398 if (off < pVCpu->iem.s.cbInstrBufTotal)
399 {
400 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
401 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
402 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
403 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
404 else
405 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
406 }
407 else
408 {
409 pVCpu->iem.s.pbInstrBuf = NULL;
410 pVCpu->iem.s.offInstrNextByte = 0;
411 pVCpu->iem.s.offCurInstrStart = 0;
412 pVCpu->iem.s.cbInstrBuf = 0;
413 pVCpu->iem.s.cbInstrBufTotal = 0;
414 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
415 }
416 }
417 else
418 {
419 pVCpu->iem.s.offInstrNextByte = 0;
420 pVCpu->iem.s.offCurInstrStart = 0;
421 pVCpu->iem.s.cbInstrBuf = 0;
422 pVCpu->iem.s.cbInstrBufTotal = 0;
423# ifdef VBOX_STRICT
424 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
425# endif
426 }
427# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
428 pVCpu->iem.s.offOpcode = 0;
429# endif
430#else /* !IEM_WITH_CODE_TLB */
431 pVCpu->iem.s.cbOpcode = 0;
432 pVCpu->iem.s.offOpcode = 0;
433#endif /* !IEM_WITH_CODE_TLB */
434 pVCpu->iem.s.offModRm = 0;
435 Assert(pVCpu->iem.s.cActiveMappings == 0);
436 pVCpu->iem.s.iNextMapping = 0;
437 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
438 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
439
440#ifdef DBGFTRACE_ENABLED
441 switch (enmMode)
442 {
443 case IEMMODE_64BIT:
444 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
445 break;
446 case IEMMODE_32BIT:
447 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
448 break;
449 case IEMMODE_16BIT:
450 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
451 break;
452 }
453#endif
454}
455
456
457
458/**
459 * Prefetch opcodes the first time when starting executing.
460 *
461 * @returns Strict VBox status code.
462 * @param pVCpu The cross context virtual CPU structure of the
463 * calling thread.
464 * @param fExecOpts Optional execution flags:
465 * - IEM_F_BYPASS_HANDLERS
466 * - IEM_F_X86_DISREGARD_LOCK
467 */
468static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
469{
470 iemInitDecoder(pVCpu, fExecOpts);
471
472#ifndef IEM_WITH_CODE_TLB
473 /*
474 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
475 *
476 * First translate CS:rIP to a physical address.
477 *
478 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
479 * all relevant bytes from the first page, as it ASSUMES it's only ever
480 * called for dealing with CS.LIM, page crossing and instructions that
481 * are too long.
482 */
483 uint32_t cbToTryRead;
484 RTGCPTR GCPtrPC;
485 if (IEM_IS_64BIT_CODE(pVCpu))
486 {
487 cbToTryRead = GUEST_PAGE_SIZE;
488 GCPtrPC = pVCpu->cpum.GstCtx.rip;
489 if (IEM_IS_CANONICAL(GCPtrPC))
490 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
491 else
492 return iemRaiseGeneralProtectionFault0(pVCpu);
493 }
494 else
495 {
496 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
497 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
498 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
499 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
500 else
501 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
502 if (cbToTryRead) { /* likely */ }
503 else /* overflowed */
504 {
505 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
506 cbToTryRead = UINT32_MAX;
507 }
508 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
509 Assert(GCPtrPC <= UINT32_MAX);
510 }
511
512 PGMPTWALK Walk;
513 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
514 if (RT_SUCCESS(rc))
515 Assert(Walk.fSucceeded); /* probable. */
516 else
517 {
518 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
519# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
520 if (Walk.fFailed & PGM_WALKFAIL_EPT)
521 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
522# endif
523 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
524 }
525 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
526 else
527 {
528 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
529# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
530 if (Walk.fFailed & PGM_WALKFAIL_EPT)
531 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
532# endif
533 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
534 }
535 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
536 else
537 {
538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
539# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
540 if (Walk.fFailed & PGM_WALKFAIL_EPT)
541 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
542# endif
543 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
544 }
545 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
546 /** @todo Check reserved bits and such stuff. PGM is better at doing
547 * that, so do it when implementing the guest virtual address
548 * TLB... */
549
550 /*
551 * Read the bytes at this address.
552 */
553 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
554 if (cbToTryRead > cbLeftOnPage)
555 cbToTryRead = cbLeftOnPage;
556 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
557 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
558
559 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
560 {
561 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
562 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
563 { /* likely */ }
564 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
565 {
566 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
567 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
568 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
569 }
570 else
571 {
572 Log((RT_SUCCESS(rcStrict)
573 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
574 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
575 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
576 return rcStrict;
577 }
578 }
579 else
580 {
581 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
582 if (RT_SUCCESS(rc))
583 { /* likely */ }
584 else
585 {
586 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
587 GCPtrPC, GCPhys, rc, cbToTryRead));
588 return rc;
589 }
590 }
591 pVCpu->iem.s.cbOpcode = cbToTryRead;
592#endif /* !IEM_WITH_CODE_TLB */
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Invalidates the IEM TLBs.
599 *
600 * This is called internally as well as by PGM when moving GC mappings.
601 *
602 * @param pVCpu The cross context virtual CPU structure of the calling
603 * thread.
604 */
605VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
606{
607#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
608 Log10(("IEMTlbInvalidateAll\n"));
609# ifdef IEM_WITH_CODE_TLB
610 pVCpu->iem.s.cbInstrBufTotal = 0;
611 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
612 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
613 { /* very likely */ }
614 else
615 {
616 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
617 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
618 while (i-- > 0)
619 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
620 }
621# endif
622
623# ifdef IEM_WITH_DATA_TLB
624 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
625 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
626 { /* very likely */ }
627 else
628 {
629 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
630 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
631 while (i-- > 0)
632 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
633 }
634# endif
635#else
636 RT_NOREF(pVCpu);
637#endif
638}
639
640
641/**
642 * Invalidates a page in the TLBs.
643 *
644 * @param pVCpu The cross context virtual CPU structure of the calling
645 * thread.
646 * @param GCPtr The address of the page to invalidate
647 * @thread EMT(pVCpu)
648 */
649VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
653 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
654 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
655 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
656
657# ifdef IEM_WITH_CODE_TLB
658 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
659 {
660 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
661 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
662 pVCpu->iem.s.cbInstrBufTotal = 0;
663 }
664# endif
665
666# ifdef IEM_WITH_DATA_TLB
667 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
668 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
669# endif
670#else
671 NOREF(pVCpu); NOREF(GCPtr);
672#endif
673}
674
675
676#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
677/**
678 * Invalid both TLBs slow fashion following a rollover.
679 *
680 * Worker for IEMTlbInvalidateAllPhysical,
681 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
682 * iemMemMapJmp and others.
683 *
684 * @thread EMT(pVCpu)
685 */
686static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
687{
688 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
689 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
690 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
691
692 unsigned i;
693# ifdef IEM_WITH_CODE_TLB
694 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
695 while (i-- > 0)
696 {
697 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
698 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
699 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
700 }
701# endif
702# ifdef IEM_WITH_DATA_TLB
703 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
704 while (i-- > 0)
705 {
706 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
707 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
708 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
709 }
710# endif
711
712}
713#endif
714
715
716/**
717 * Invalidates the host physical aspects of the IEM TLBs.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 * @note Currently not used.
724 */
725VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
726{
727#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
728 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
729 Log10(("IEMTlbInvalidateAllPhysical\n"));
730
731# ifdef IEM_WITH_CODE_TLB
732 pVCpu->iem.s.cbInstrBufTotal = 0;
733# endif
734 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
735 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
736 {
737 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
738 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
739 }
740 else
741 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
742#else
743 NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates the host physical aspects of the IEM TLBs.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVM The cross context VM structure.
754 * @param idCpuCaller The ID of the calling EMT if available to the caller,
755 * otherwise NIL_VMCPUID.
756 * @param enmReason The reason we're called.
757 *
758 * @remarks Caller holds the PGM lock.
759 */
760VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
761{
762#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
763 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
764 if (pVCpuCaller)
765 VMCPU_ASSERT_EMT(pVCpuCaller);
766 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
767
768 VMCC_FOR_EACH_VMCPU(pVM)
769 {
770# ifdef IEM_WITH_CODE_TLB
771 if (pVCpuCaller == pVCpu)
772 pVCpu->iem.s.cbInstrBufTotal = 0;
773# endif
774
775 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
776 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
777 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
778 { /* likely */}
779 else if (pVCpuCaller != pVCpu)
780 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
781 else
782 {
783 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
784 continue;
785 }
786 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
787 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
788 }
789 VMCC_FOR_EACH_VMCPU_END(pVM);
790
791#else
792 RT_NOREF(pVM, idCpuCaller, enmReason);
793#endif
794}
795
796
797/**
798 * Flushes the prefetch buffer, light version.
799 */
800void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
801{
802#ifndef IEM_WITH_CODE_TLB
803 pVCpu->iem.s.cbOpcode = cbInstr;
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810/**
811 * Flushes the prefetch buffer, heavy version.
812 */
813void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
814{
815#ifndef IEM_WITH_CODE_TLB
816 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
817#elif 1
818 pVCpu->iem.s.cbInstrBufTotal = 0;
819 RT_NOREF(cbInstr);
820#else
821 RT_NOREF(pVCpu, cbInstr);
822#endif
823}
824
825
826
827#ifdef IEM_WITH_CODE_TLB
828
829/**
830 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
831 * failure and jumps.
832 *
833 * We end up here for a number of reasons:
834 * - pbInstrBuf isn't yet initialized.
835 * - Advancing beyond the buffer boundrary (e.g. cross page).
836 * - Advancing beyond the CS segment limit.
837 * - Fetching from non-mappable page (e.g. MMIO).
838 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
839 *
840 * @param pVCpu The cross context virtual CPU structure of the
841 * calling thread.
842 * @param pvDst Where to return the bytes.
843 * @param cbDst Number of bytes to read. A value of zero is
844 * allowed for initializing pbInstrBuf (the
845 * recompiler does this). In this case it is best
846 * to set pbInstrBuf to NULL prior to the call.
847 */
848void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
849{
850# ifdef IN_RING3
851 for (;;)
852 {
853 Assert(cbDst <= 8);
854 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
855
856 /*
857 * We might have a partial buffer match, deal with that first to make the
858 * rest simpler. This is the first part of the cross page/buffer case.
859 */
860 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
861 if (pbInstrBuf != NULL)
862 {
863 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
864 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
865 if (offBuf < cbInstrBuf)
866 {
867 Assert(offBuf + cbDst > cbInstrBuf);
868 uint32_t const cbCopy = cbInstrBuf - offBuf;
869 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
870
871 cbDst -= cbCopy;
872 pvDst = (uint8_t *)pvDst + cbCopy;
873 offBuf += cbCopy;
874 }
875 }
876
877 /*
878 * Check segment limit, figuring how much we're allowed to access at this point.
879 *
880 * We will fault immediately if RIP is past the segment limit / in non-canonical
881 * territory. If we do continue, there are one or more bytes to read before we
882 * end up in trouble and we need to do that first before faulting.
883 */
884 RTGCPTR GCPtrFirst;
885 uint32_t cbMaxRead;
886 if (IEM_IS_64BIT_CODE(pVCpu))
887 {
888 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
889 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
890 { /* likely */ }
891 else
892 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
893 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
894 }
895 else
896 {
897 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
898 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
899 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
900 { /* likely */ }
901 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
902 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
903 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
904 if (cbMaxRead != 0)
905 { /* likely */ }
906 else
907 {
908 /* Overflowed because address is 0 and limit is max. */
909 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
910 cbMaxRead = X86_PAGE_SIZE;
911 }
912 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
913 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
914 if (cbMaxRead2 < cbMaxRead)
915 cbMaxRead = cbMaxRead2;
916 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
917 }
918
919 /*
920 * Get the TLB entry for this piece of code.
921 */
922 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
923 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
924 if (pTlbe->uTag == uTag)
925 {
926 /* likely when executing lots of code, otherwise unlikely */
927# ifdef VBOX_WITH_STATISTICS
928 pVCpu->iem.s.CodeTlb.cTlbHits++;
929# endif
930 }
931 else
932 {
933 pVCpu->iem.s.CodeTlb.cTlbMisses++;
934 PGMPTWALK Walk;
935 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
936 if (RT_FAILURE(rc))
937 {
938#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
939 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
940 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
941#endif
942 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
943 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
944 }
945
946 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
947 Assert(Walk.fSucceeded);
948 pTlbe->uTag = uTag;
949 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
950 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
951 pTlbe->GCPhys = Walk.GCPhys;
952 pTlbe->pbMappingR3 = NULL;
953 }
954
955 /*
956 * Check TLB page table level access flags.
957 */
958 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
959 {
960 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
961 {
962 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
963 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
964 }
965 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
966 {
967 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
968 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
969 }
970 }
971
972 /*
973 * Set the accessed flags.
974 * ASSUMES this is set when the address is translated rather than on commit...
975 */
976 /** @todo testcase: check when the A bit are actually set by the CPU for code. */
977 if (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED)
978 {
979 int rc2 = PGMGstModifyPage(pVCpu, GCPtrFirst, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
980 AssertRC(rc2);
981 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
982 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
983 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_F_PT_NO_ACCESSED;
984 }
985
986 /*
987 * Look up the physical page info if necessary.
988 */
989 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
990 { /* not necessary */ }
991 else
992 {
993 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
994 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
995 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
996 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
997 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
998 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
999 { /* likely */ }
1000 else
1001 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1002 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1003 | IEMTLBE_F_NO_MAPPINGR3
1004 | IEMTLBE_F_PG_NO_READ
1005 | IEMTLBE_F_PG_NO_WRITE
1006 | IEMTLBE_F_PG_UNASSIGNED
1007 | IEMTLBE_F_PG_CODE_PAGE);
1008 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1009 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1010 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1011 }
1012
1013# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1014 /*
1015 * Try do a direct read using the pbMappingR3 pointer.
1016 */
1017 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1018 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1019 {
1020 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1021 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1022 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1023 {
1024 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1025 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1026 }
1027 else
1028 {
1029 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1030 if (cbInstr + (uint32_t)cbDst <= 15)
1031 {
1032 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1033 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1034 }
1035 else
1036 {
1037 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1038 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1039 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1040 }
1041 }
1042 if (cbDst <= cbMaxRead)
1043 {
1044 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1045 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1046
1047 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1048 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1049 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1050 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1051 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
1052 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1053 else
1054 Assert(!pvDst);
1055 return;
1056 }
1057 pVCpu->iem.s.pbInstrBuf = NULL;
1058
1059 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1060 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1061 }
1062# else
1063# error "refactor as needed"
1064 /*
1065 * If there is no special read handling, so we can read a bit more and
1066 * put it in the prefetch buffer.
1067 */
1068 if ( cbDst < cbMaxRead
1069 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1070 {
1071 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1072 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1073 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1074 { /* likely */ }
1075 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1076 {
1077 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1078 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1079 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1080 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1081 }
1082 else
1083 {
1084 Log((RT_SUCCESS(rcStrict)
1085 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1086 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1087 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1088 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1089 }
1090 }
1091# endif
1092 /*
1093 * Special read handling, so only read exactly what's needed.
1094 * This is a highly unlikely scenario.
1095 */
1096 else
1097 {
1098 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1099
1100 /* Check instruction length. */
1101 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1102 if (RT_LIKELY(cbInstr + cbDst <= 15))
1103 { /* likely */ }
1104 else
1105 {
1106 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1107 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1108 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1109 }
1110
1111 /* Do the reading. */
1112 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1113 if (cbToRead > 0)
1114 {
1115 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1116 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1117 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1118 { /* likely */ }
1119 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1120 {
1121 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1122 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1123 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1124 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1125 }
1126 else
1127 {
1128 Log((RT_SUCCESS(rcStrict)
1129 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1130 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1131 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1132 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1133 }
1134 }
1135
1136 /* Update the state and probably return. */
1137 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1138 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1139 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1140
1141 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1142 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1143 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1144 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1145 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1146 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1147 pVCpu->iem.s.pbInstrBuf = NULL;
1148 if (cbToRead == cbDst)
1149 return;
1150 Assert(cbToRead == cbMaxRead);
1151 }
1152
1153 /*
1154 * More to read, loop.
1155 */
1156 cbDst -= cbMaxRead;
1157 pvDst = (uint8_t *)pvDst + cbMaxRead;
1158 }
1159# else /* !IN_RING3 */
1160 RT_NOREF(pvDst, cbDst);
1161 if (pvDst || cbDst)
1162 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1163# endif /* !IN_RING3 */
1164}
1165
1166#else /* !IEM_WITH_CODE_TLB */
1167
1168/**
1169 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1170 * exception if it fails.
1171 *
1172 * @returns Strict VBox status code.
1173 * @param pVCpu The cross context virtual CPU structure of the
1174 * calling thread.
1175 * @param cbMin The minimum number of bytes relative offOpcode
1176 * that must be read.
1177 */
1178VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1179{
1180 /*
1181 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1182 *
1183 * First translate CS:rIP to a physical address.
1184 */
1185 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1186 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1187 uint8_t const cbLeft = cbOpcode - offOpcode;
1188 Assert(cbLeft < cbMin);
1189 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1190
1191 uint32_t cbToTryRead;
1192 RTGCPTR GCPtrNext;
1193 if (IEM_IS_64BIT_CODE(pVCpu))
1194 {
1195 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1196 if (!IEM_IS_CANONICAL(GCPtrNext))
1197 return iemRaiseGeneralProtectionFault0(pVCpu);
1198 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1199 }
1200 else
1201 {
1202 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1203 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1204 GCPtrNext32 += cbOpcode;
1205 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1206 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1207 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1208 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1209 if (!cbToTryRead) /* overflowed */
1210 {
1211 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1212 cbToTryRead = UINT32_MAX;
1213 /** @todo check out wrapping around the code segment. */
1214 }
1215 if (cbToTryRead < cbMin - cbLeft)
1216 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1217 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1218
1219 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1220 if (cbToTryRead > cbLeftOnPage)
1221 cbToTryRead = cbLeftOnPage;
1222 }
1223
1224 /* Restrict to opcode buffer space.
1225
1226 We're making ASSUMPTIONS here based on work done previously in
1227 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1228 be fetched in case of an instruction crossing two pages. */
1229 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1230 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1231 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1232 { /* likely */ }
1233 else
1234 {
1235 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1236 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1237 return iemRaiseGeneralProtectionFault0(pVCpu);
1238 }
1239
1240 PGMPTWALK Walk;
1241 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1242 if (RT_FAILURE(rc))
1243 {
1244 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1245#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1246 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1247 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1248#endif
1249 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1250 }
1251 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1252 {
1253 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1254#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1255 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1256 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1257#endif
1258 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1259 }
1260 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1261 {
1262 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1263#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1264 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1265 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1266#endif
1267 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1268 }
1269 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1270 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1271 /** @todo Check reserved bits and such stuff. PGM is better at doing
1272 * that, so do it when implementing the guest virtual address
1273 * TLB... */
1274
1275 /*
1276 * Read the bytes at this address.
1277 *
1278 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1279 * and since PATM should only patch the start of an instruction there
1280 * should be no need to check again here.
1281 */
1282 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1283 {
1284 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1285 cbToTryRead, PGMACCESSORIGIN_IEM);
1286 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1287 { /* likely */ }
1288 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1289 {
1290 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1291 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1292 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1293 }
1294 else
1295 {
1296 Log((RT_SUCCESS(rcStrict)
1297 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1298 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1299 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1300 return rcStrict;
1301 }
1302 }
1303 else
1304 {
1305 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1306 if (RT_SUCCESS(rc))
1307 { /* likely */ }
1308 else
1309 {
1310 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1311 return rc;
1312 }
1313 }
1314 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1315 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1316
1317 return VINF_SUCCESS;
1318}
1319
1320#endif /* !IEM_WITH_CODE_TLB */
1321#ifndef IEM_WITH_SETJMP
1322
1323/**
1324 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1325 *
1326 * @returns Strict VBox status code.
1327 * @param pVCpu The cross context virtual CPU structure of the
1328 * calling thread.
1329 * @param pb Where to return the opcode byte.
1330 */
1331VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1332{
1333 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1334 if (rcStrict == VINF_SUCCESS)
1335 {
1336 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1337 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1338 pVCpu->iem.s.offOpcode = offOpcode + 1;
1339 }
1340 else
1341 *pb = 0;
1342 return rcStrict;
1343}
1344
1345#else /* IEM_WITH_SETJMP */
1346
1347/**
1348 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1349 *
1350 * @returns The opcode byte.
1351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1352 */
1353uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1354{
1355# ifdef IEM_WITH_CODE_TLB
1356 uint8_t u8;
1357 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1358 return u8;
1359# else
1360 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1361 if (rcStrict == VINF_SUCCESS)
1362 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1363 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1364# endif
1365}
1366
1367#endif /* IEM_WITH_SETJMP */
1368
1369#ifndef IEM_WITH_SETJMP
1370
1371/**
1372 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1373 *
1374 * @returns Strict VBox status code.
1375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1376 * @param pu16 Where to return the opcode dword.
1377 */
1378VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1379{
1380 uint8_t u8;
1381 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1382 if (rcStrict == VINF_SUCCESS)
1383 *pu16 = (int8_t)u8;
1384 return rcStrict;
1385}
1386
1387
1388/**
1389 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1390 *
1391 * @returns Strict VBox status code.
1392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1393 * @param pu32 Where to return the opcode dword.
1394 */
1395VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1396{
1397 uint8_t u8;
1398 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1399 if (rcStrict == VINF_SUCCESS)
1400 *pu32 = (int8_t)u8;
1401 return rcStrict;
1402}
1403
1404
1405/**
1406 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1407 *
1408 * @returns Strict VBox status code.
1409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1410 * @param pu64 Where to return the opcode qword.
1411 */
1412VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1413{
1414 uint8_t u8;
1415 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1416 if (rcStrict == VINF_SUCCESS)
1417 *pu64 = (int8_t)u8;
1418 return rcStrict;
1419}
1420
1421#endif /* !IEM_WITH_SETJMP */
1422
1423
1424#ifndef IEM_WITH_SETJMP
1425
1426/**
1427 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1428 *
1429 * @returns Strict VBox status code.
1430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1431 * @param pu16 Where to return the opcode word.
1432 */
1433VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1434{
1435 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1436 if (rcStrict == VINF_SUCCESS)
1437 {
1438 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1439# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1440 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1441# else
1442 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1443# endif
1444 pVCpu->iem.s.offOpcode = offOpcode + 2;
1445 }
1446 else
1447 *pu16 = 0;
1448 return rcStrict;
1449}
1450
1451#else /* IEM_WITH_SETJMP */
1452
1453/**
1454 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1455 *
1456 * @returns The opcode word.
1457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1458 */
1459uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1460{
1461# ifdef IEM_WITH_CODE_TLB
1462 uint16_t u16;
1463 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1464 return u16;
1465# else
1466 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1467 if (rcStrict == VINF_SUCCESS)
1468 {
1469 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1470 pVCpu->iem.s.offOpcode += 2;
1471# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1472 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1473# else
1474 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1475# endif
1476 }
1477 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1478# endif
1479}
1480
1481#endif /* IEM_WITH_SETJMP */
1482
1483#ifndef IEM_WITH_SETJMP
1484
1485/**
1486 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1487 *
1488 * @returns Strict VBox status code.
1489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1490 * @param pu32 Where to return the opcode double word.
1491 */
1492VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1493{
1494 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1495 if (rcStrict == VINF_SUCCESS)
1496 {
1497 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1498 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1499 pVCpu->iem.s.offOpcode = offOpcode + 2;
1500 }
1501 else
1502 *pu32 = 0;
1503 return rcStrict;
1504}
1505
1506
1507/**
1508 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1509 *
1510 * @returns Strict VBox status code.
1511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1512 * @param pu64 Where to return the opcode quad word.
1513 */
1514VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1515{
1516 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1517 if (rcStrict == VINF_SUCCESS)
1518 {
1519 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1520 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1521 pVCpu->iem.s.offOpcode = offOpcode + 2;
1522 }
1523 else
1524 *pu64 = 0;
1525 return rcStrict;
1526}
1527
1528#endif /* !IEM_WITH_SETJMP */
1529
1530#ifndef IEM_WITH_SETJMP
1531
1532/**
1533 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1534 *
1535 * @returns Strict VBox status code.
1536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1537 * @param pu32 Where to return the opcode dword.
1538 */
1539VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1540{
1541 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1542 if (rcStrict == VINF_SUCCESS)
1543 {
1544 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1545# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1546 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1547# else
1548 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1549 pVCpu->iem.s.abOpcode[offOpcode + 1],
1550 pVCpu->iem.s.abOpcode[offOpcode + 2],
1551 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1552# endif
1553 pVCpu->iem.s.offOpcode = offOpcode + 4;
1554 }
1555 else
1556 *pu32 = 0;
1557 return rcStrict;
1558}
1559
1560#else /* IEM_WITH_SETJMP */
1561
1562/**
1563 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1564 *
1565 * @returns The opcode dword.
1566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1567 */
1568uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1569{
1570# ifdef IEM_WITH_CODE_TLB
1571 uint32_t u32;
1572 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1573 return u32;
1574# else
1575 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1576 if (rcStrict == VINF_SUCCESS)
1577 {
1578 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1579 pVCpu->iem.s.offOpcode = offOpcode + 4;
1580# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1581 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1582# else
1583 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1584 pVCpu->iem.s.abOpcode[offOpcode + 1],
1585 pVCpu->iem.s.abOpcode[offOpcode + 2],
1586 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1587# endif
1588 }
1589 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1590# endif
1591}
1592
1593#endif /* IEM_WITH_SETJMP */
1594
1595#ifndef IEM_WITH_SETJMP
1596
1597/**
1598 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1599 *
1600 * @returns Strict VBox status code.
1601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1602 * @param pu64 Where to return the opcode dword.
1603 */
1604VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1605{
1606 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1607 if (rcStrict == VINF_SUCCESS)
1608 {
1609 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1610 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1611 pVCpu->iem.s.abOpcode[offOpcode + 1],
1612 pVCpu->iem.s.abOpcode[offOpcode + 2],
1613 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1614 pVCpu->iem.s.offOpcode = offOpcode + 4;
1615 }
1616 else
1617 *pu64 = 0;
1618 return rcStrict;
1619}
1620
1621
1622/**
1623 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1624 *
1625 * @returns Strict VBox status code.
1626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1627 * @param pu64 Where to return the opcode qword.
1628 */
1629VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1630{
1631 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1632 if (rcStrict == VINF_SUCCESS)
1633 {
1634 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1635 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1636 pVCpu->iem.s.abOpcode[offOpcode + 1],
1637 pVCpu->iem.s.abOpcode[offOpcode + 2],
1638 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1639 pVCpu->iem.s.offOpcode = offOpcode + 4;
1640 }
1641 else
1642 *pu64 = 0;
1643 return rcStrict;
1644}
1645
1646#endif /* !IEM_WITH_SETJMP */
1647
1648#ifndef IEM_WITH_SETJMP
1649
1650/**
1651 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1652 *
1653 * @returns Strict VBox status code.
1654 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1655 * @param pu64 Where to return the opcode qword.
1656 */
1657VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1658{
1659 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1660 if (rcStrict == VINF_SUCCESS)
1661 {
1662 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1663# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1664 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1665# else
1666 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1667 pVCpu->iem.s.abOpcode[offOpcode + 1],
1668 pVCpu->iem.s.abOpcode[offOpcode + 2],
1669 pVCpu->iem.s.abOpcode[offOpcode + 3],
1670 pVCpu->iem.s.abOpcode[offOpcode + 4],
1671 pVCpu->iem.s.abOpcode[offOpcode + 5],
1672 pVCpu->iem.s.abOpcode[offOpcode + 6],
1673 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1674# endif
1675 pVCpu->iem.s.offOpcode = offOpcode + 8;
1676 }
1677 else
1678 *pu64 = 0;
1679 return rcStrict;
1680}
1681
1682#else /* IEM_WITH_SETJMP */
1683
1684/**
1685 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1686 *
1687 * @returns The opcode qword.
1688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1689 */
1690uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1691{
1692# ifdef IEM_WITH_CODE_TLB
1693 uint64_t u64;
1694 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1695 return u64;
1696# else
1697 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1698 if (rcStrict == VINF_SUCCESS)
1699 {
1700 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1701 pVCpu->iem.s.offOpcode = offOpcode + 8;
1702# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1703 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1704# else
1705 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1706 pVCpu->iem.s.abOpcode[offOpcode + 1],
1707 pVCpu->iem.s.abOpcode[offOpcode + 2],
1708 pVCpu->iem.s.abOpcode[offOpcode + 3],
1709 pVCpu->iem.s.abOpcode[offOpcode + 4],
1710 pVCpu->iem.s.abOpcode[offOpcode + 5],
1711 pVCpu->iem.s.abOpcode[offOpcode + 6],
1712 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1713# endif
1714 }
1715 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1716# endif
1717}
1718
1719#endif /* IEM_WITH_SETJMP */
1720
1721
1722
1723/** @name Misc Worker Functions.
1724 * @{
1725 */
1726
1727/**
1728 * Gets the exception class for the specified exception vector.
1729 *
1730 * @returns The class of the specified exception.
1731 * @param uVector The exception vector.
1732 */
1733static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1734{
1735 Assert(uVector <= X86_XCPT_LAST);
1736 switch (uVector)
1737 {
1738 case X86_XCPT_DE:
1739 case X86_XCPT_TS:
1740 case X86_XCPT_NP:
1741 case X86_XCPT_SS:
1742 case X86_XCPT_GP:
1743 case X86_XCPT_SX: /* AMD only */
1744 return IEMXCPTCLASS_CONTRIBUTORY;
1745
1746 case X86_XCPT_PF:
1747 case X86_XCPT_VE: /* Intel only */
1748 return IEMXCPTCLASS_PAGE_FAULT;
1749
1750 case X86_XCPT_DF:
1751 return IEMXCPTCLASS_DOUBLE_FAULT;
1752 }
1753 return IEMXCPTCLASS_BENIGN;
1754}
1755
1756
1757/**
1758 * Evaluates how to handle an exception caused during delivery of another event
1759 * (exception / interrupt).
1760 *
1761 * @returns How to handle the recursive exception.
1762 * @param pVCpu The cross context virtual CPU structure of the
1763 * calling thread.
1764 * @param fPrevFlags The flags of the previous event.
1765 * @param uPrevVector The vector of the previous event.
1766 * @param fCurFlags The flags of the current exception.
1767 * @param uCurVector The vector of the current exception.
1768 * @param pfXcptRaiseInfo Where to store additional information about the
1769 * exception condition. Optional.
1770 */
1771VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1772 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1773{
1774 /*
1775 * Only CPU exceptions can be raised while delivering other events, software interrupt
1776 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1777 */
1778 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1779 Assert(pVCpu); RT_NOREF(pVCpu);
1780 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1781
1782 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1783 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1784 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1785 {
1786 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1787 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1788 {
1789 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1790 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1791 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1792 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1793 {
1794 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1795 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1796 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1797 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1798 uCurVector, pVCpu->cpum.GstCtx.cr2));
1799 }
1800 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1801 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1802 {
1803 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1804 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1805 }
1806 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1807 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1808 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1809 {
1810 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1811 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1812 }
1813 }
1814 else
1815 {
1816 if (uPrevVector == X86_XCPT_NMI)
1817 {
1818 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1819 if (uCurVector == X86_XCPT_PF)
1820 {
1821 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1822 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1823 }
1824 }
1825 else if ( uPrevVector == X86_XCPT_AC
1826 && uCurVector == X86_XCPT_AC)
1827 {
1828 enmRaise = IEMXCPTRAISE_CPU_HANG;
1829 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1830 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1831 }
1832 }
1833 }
1834 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1835 {
1836 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1837 if (uCurVector == X86_XCPT_PF)
1838 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1839 }
1840 else
1841 {
1842 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1843 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1844 }
1845
1846 if (pfXcptRaiseInfo)
1847 *pfXcptRaiseInfo = fRaiseInfo;
1848 return enmRaise;
1849}
1850
1851
1852/**
1853 * Enters the CPU shutdown state initiated by a triple fault or other
1854 * unrecoverable conditions.
1855 *
1856 * @returns Strict VBox status code.
1857 * @param pVCpu The cross context virtual CPU structure of the
1858 * calling thread.
1859 */
1860static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1861{
1862 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1863 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1864
1865 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1866 {
1867 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1868 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1869 }
1870
1871 RT_NOREF(pVCpu);
1872 return VINF_EM_TRIPLE_FAULT;
1873}
1874
1875
1876/**
1877 * Validates a new SS segment.
1878 *
1879 * @returns VBox strict status code.
1880 * @param pVCpu The cross context virtual CPU structure of the
1881 * calling thread.
1882 * @param NewSS The new SS selctor.
1883 * @param uCpl The CPL to load the stack for.
1884 * @param pDesc Where to return the descriptor.
1885 */
1886static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1887{
1888 /* Null selectors are not allowed (we're not called for dispatching
1889 interrupts with SS=0 in long mode). */
1890 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1891 {
1892 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1893 return iemRaiseTaskSwitchFault0(pVCpu);
1894 }
1895
1896 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1897 if ((NewSS & X86_SEL_RPL) != uCpl)
1898 {
1899 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1900 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1901 }
1902
1903 /*
1904 * Read the descriptor.
1905 */
1906 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1907 if (rcStrict != VINF_SUCCESS)
1908 return rcStrict;
1909
1910 /*
1911 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1912 */
1913 if (!pDesc->Legacy.Gen.u1DescType)
1914 {
1915 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1916 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1917 }
1918
1919 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1920 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1921 {
1922 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1923 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1924 }
1925 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1926 {
1927 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1928 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1929 }
1930
1931 /* Is it there? */
1932 /** @todo testcase: Is this checked before the canonical / limit check below? */
1933 if (!pDesc->Legacy.Gen.u1Present)
1934 {
1935 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1936 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1937 }
1938
1939 return VINF_SUCCESS;
1940}
1941
1942/** @} */
1943
1944
1945/** @name Raising Exceptions.
1946 *
1947 * @{
1948 */
1949
1950
1951/**
1952 * Loads the specified stack far pointer from the TSS.
1953 *
1954 * @returns VBox strict status code.
1955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1956 * @param uCpl The CPL to load the stack for.
1957 * @param pSelSS Where to return the new stack segment.
1958 * @param puEsp Where to return the new stack pointer.
1959 */
1960static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1961{
1962 VBOXSTRICTRC rcStrict;
1963 Assert(uCpl < 4);
1964
1965 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1966 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1967 {
1968 /*
1969 * 16-bit TSS (X86TSS16).
1970 */
1971 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1972 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1973 {
1974 uint32_t off = uCpl * 4 + 2;
1975 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1976 {
1977 /** @todo check actual access pattern here. */
1978 uint32_t u32Tmp = 0; /* gcc maybe... */
1979 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1980 if (rcStrict == VINF_SUCCESS)
1981 {
1982 *puEsp = RT_LOWORD(u32Tmp);
1983 *pSelSS = RT_HIWORD(u32Tmp);
1984 return VINF_SUCCESS;
1985 }
1986 }
1987 else
1988 {
1989 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1990 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1991 }
1992 break;
1993 }
1994
1995 /*
1996 * 32-bit TSS (X86TSS32).
1997 */
1998 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1999 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2000 {
2001 uint32_t off = uCpl * 8 + 4;
2002 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2003 {
2004/** @todo check actual access pattern here. */
2005 uint64_t u64Tmp;
2006 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2007 if (rcStrict == VINF_SUCCESS)
2008 {
2009 *puEsp = u64Tmp & UINT32_MAX;
2010 *pSelSS = (RTSEL)(u64Tmp >> 32);
2011 return VINF_SUCCESS;
2012 }
2013 }
2014 else
2015 {
2016 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2017 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2018 }
2019 break;
2020 }
2021
2022 default:
2023 AssertFailed();
2024 rcStrict = VERR_IEM_IPE_4;
2025 break;
2026 }
2027
2028 *puEsp = 0; /* make gcc happy */
2029 *pSelSS = 0; /* make gcc happy */
2030 return rcStrict;
2031}
2032
2033
2034/**
2035 * Loads the specified stack pointer from the 64-bit TSS.
2036 *
2037 * @returns VBox strict status code.
2038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2039 * @param uCpl The CPL to load the stack for.
2040 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2041 * @param puRsp Where to return the new stack pointer.
2042 */
2043static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2044{
2045 Assert(uCpl < 4);
2046 Assert(uIst < 8);
2047 *puRsp = 0; /* make gcc happy */
2048
2049 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2050 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2051
2052 uint32_t off;
2053 if (uIst)
2054 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2055 else
2056 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2057 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2058 {
2059 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2060 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2061 }
2062
2063 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2064}
2065
2066
2067/**
2068 * Adjust the CPU state according to the exception being raised.
2069 *
2070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2071 * @param u8Vector The exception that has been raised.
2072 */
2073DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2074{
2075 switch (u8Vector)
2076 {
2077 case X86_XCPT_DB:
2078 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2079 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2080 break;
2081 /** @todo Read the AMD and Intel exception reference... */
2082 }
2083}
2084
2085
2086/**
2087 * Implements exceptions and interrupts for real mode.
2088 *
2089 * @returns VBox strict status code.
2090 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2091 * @param cbInstr The number of bytes to offset rIP by in the return
2092 * address.
2093 * @param u8Vector The interrupt / exception vector number.
2094 * @param fFlags The flags.
2095 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2096 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2097 */
2098static VBOXSTRICTRC
2099iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2100 uint8_t cbInstr,
2101 uint8_t u8Vector,
2102 uint32_t fFlags,
2103 uint16_t uErr,
2104 uint64_t uCr2) RT_NOEXCEPT
2105{
2106 NOREF(uErr); NOREF(uCr2);
2107 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2108
2109 /*
2110 * Read the IDT entry.
2111 */
2112 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2113 {
2114 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2115 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2116 }
2117 RTFAR16 Idte;
2118 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2119 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2120 {
2121 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2122 return rcStrict;
2123 }
2124
2125#ifdef LOG_ENABLED
2126 /* If software interrupt, try decode it if logging is enabled and such. */
2127 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2128 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2129 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2130#endif
2131
2132 /*
2133 * Push the stack frame.
2134 */
2135 uint8_t bUnmapInfo;
2136 uint16_t *pu16Frame;
2137 uint64_t uNewRsp;
2138 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2139 if (rcStrict != VINF_SUCCESS)
2140 return rcStrict;
2141
2142 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2143#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2144 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2145 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2146 fEfl |= UINT16_C(0xf000);
2147#endif
2148 pu16Frame[2] = (uint16_t)fEfl;
2149 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2150 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2151 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2152 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2153 return rcStrict;
2154
2155 /*
2156 * Load the vector address into cs:ip and make exception specific state
2157 * adjustments.
2158 */
2159 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2160 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2161 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2162 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2163 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2164 pVCpu->cpum.GstCtx.rip = Idte.off;
2165 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2166 IEMMISC_SET_EFL(pVCpu, fEfl);
2167
2168 /** @todo do we actually do this in real mode? */
2169 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2170 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2171
2172 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2173 so best leave them alone in case we're in a weird kind of real mode... */
2174
2175 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2176}
2177
2178
2179/**
2180 * Loads a NULL data selector into when coming from V8086 mode.
2181 *
2182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2183 * @param pSReg Pointer to the segment register.
2184 */
2185DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2186{
2187 pSReg->Sel = 0;
2188 pSReg->ValidSel = 0;
2189 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2190 {
2191 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2192 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2193 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2194 }
2195 else
2196 {
2197 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2198 /** @todo check this on AMD-V */
2199 pSReg->u64Base = 0;
2200 pSReg->u32Limit = 0;
2201 }
2202}
2203
2204
2205/**
2206 * Loads a segment selector during a task switch in V8086 mode.
2207 *
2208 * @param pSReg Pointer to the segment register.
2209 * @param uSel The selector value to load.
2210 */
2211DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2212{
2213 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2214 pSReg->Sel = uSel;
2215 pSReg->ValidSel = uSel;
2216 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2217 pSReg->u64Base = uSel << 4;
2218 pSReg->u32Limit = 0xffff;
2219 pSReg->Attr.u = 0xf3;
2220}
2221
2222
2223/**
2224 * Loads a segment selector during a task switch in protected mode.
2225 *
2226 * In this task switch scenario, we would throw \#TS exceptions rather than
2227 * \#GPs.
2228 *
2229 * @returns VBox strict status code.
2230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2231 * @param pSReg Pointer to the segment register.
2232 * @param uSel The new selector value.
2233 *
2234 * @remarks This does _not_ handle CS or SS.
2235 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2236 */
2237static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2238{
2239 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2240
2241 /* Null data selector. */
2242 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2243 {
2244 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2245 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2246 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2247 return VINF_SUCCESS;
2248 }
2249
2250 /* Fetch the descriptor. */
2251 IEMSELDESC Desc;
2252 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2253 if (rcStrict != VINF_SUCCESS)
2254 {
2255 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2256 VBOXSTRICTRC_VAL(rcStrict)));
2257 return rcStrict;
2258 }
2259
2260 /* Must be a data segment or readable code segment. */
2261 if ( !Desc.Legacy.Gen.u1DescType
2262 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2263 {
2264 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2265 Desc.Legacy.Gen.u4Type));
2266 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2267 }
2268
2269 /* Check privileges for data segments and non-conforming code segments. */
2270 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2271 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2272 {
2273 /* The RPL and the new CPL must be less than or equal to the DPL. */
2274 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2275 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2276 {
2277 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2278 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2279 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2280 }
2281 }
2282
2283 /* Is it there? */
2284 if (!Desc.Legacy.Gen.u1Present)
2285 {
2286 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2287 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2288 }
2289
2290 /* The base and limit. */
2291 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2292 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2293
2294 /*
2295 * Ok, everything checked out fine. Now set the accessed bit before
2296 * committing the result into the registers.
2297 */
2298 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2299 {
2300 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2301 if (rcStrict != VINF_SUCCESS)
2302 return rcStrict;
2303 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2304 }
2305
2306 /* Commit */
2307 pSReg->Sel = uSel;
2308 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2309 pSReg->u32Limit = cbLimit;
2310 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2311 pSReg->ValidSel = uSel;
2312 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2313 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2314 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2315
2316 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2317 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2318 return VINF_SUCCESS;
2319}
2320
2321
2322/**
2323 * Performs a task switch.
2324 *
2325 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2326 * caller is responsible for performing the necessary checks (like DPL, TSS
2327 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2328 * reference for JMP, CALL, IRET.
2329 *
2330 * If the task switch is the due to a software interrupt or hardware exception,
2331 * the caller is responsible for validating the TSS selector and descriptor. See
2332 * Intel Instruction reference for INT n.
2333 *
2334 * @returns VBox strict status code.
2335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2336 * @param enmTaskSwitch The cause of the task switch.
2337 * @param uNextEip The EIP effective after the task switch.
2338 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2339 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2340 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2341 * @param SelTss The TSS selector of the new task.
2342 * @param pNewDescTss Pointer to the new TSS descriptor.
2343 */
2344VBOXSTRICTRC
2345iemTaskSwitch(PVMCPUCC pVCpu,
2346 IEMTASKSWITCH enmTaskSwitch,
2347 uint32_t uNextEip,
2348 uint32_t fFlags,
2349 uint16_t uErr,
2350 uint64_t uCr2,
2351 RTSEL SelTss,
2352 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2353{
2354 Assert(!IEM_IS_REAL_MODE(pVCpu));
2355 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2356 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2357
2358 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2359 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2360 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2361 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2362 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2363
2364 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2365 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2366
2367 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2368 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2369
2370 /* Update CR2 in case it's a page-fault. */
2371 /** @todo This should probably be done much earlier in IEM/PGM. See
2372 * @bugref{5653#c49}. */
2373 if (fFlags & IEM_XCPT_FLAGS_CR2)
2374 pVCpu->cpum.GstCtx.cr2 = uCr2;
2375
2376 /*
2377 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2378 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2379 */
2380 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2381 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2382 if (uNewTssLimit < uNewTssLimitMin)
2383 {
2384 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2385 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2386 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2387 }
2388
2389 /*
2390 * Task switches in VMX non-root mode always cause task switches.
2391 * The new TSS must have been read and validated (DPL, limits etc.) before a
2392 * task-switch VM-exit commences.
2393 *
2394 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2395 */
2396 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2397 {
2398 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2399 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2400 }
2401
2402 /*
2403 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2404 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2405 */
2406 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2407 {
2408 uint64_t const uExitInfo1 = SelTss;
2409 uint64_t uExitInfo2 = uErr;
2410 switch (enmTaskSwitch)
2411 {
2412 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2413 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2414 default: break;
2415 }
2416 if (fFlags & IEM_XCPT_FLAGS_ERR)
2417 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2418 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2419 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2420
2421 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2422 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2423 RT_NOREF2(uExitInfo1, uExitInfo2);
2424 }
2425
2426 /*
2427 * Check the current TSS limit. The last written byte to the current TSS during the
2428 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2429 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2430 *
2431 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2432 * end up with smaller than "legal" TSS limits.
2433 */
2434 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2435 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2436 if (uCurTssLimit < uCurTssLimitMin)
2437 {
2438 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2439 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2440 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2441 }
2442
2443 /*
2444 * Verify that the new TSS can be accessed and map it. Map only the required contents
2445 * and not the entire TSS.
2446 */
2447 uint8_t bUnmapInfoNewTss;
2448 void *pvNewTss;
2449 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2450 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2451 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2452 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2453 * not perform correct translation if this happens. See Intel spec. 7.2.1
2454 * "Task-State Segment". */
2455 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2456/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2457 * Consider wrapping the remainder into a function for simpler cleanup. */
2458 if (rcStrict != VINF_SUCCESS)
2459 {
2460 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2461 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2462 return rcStrict;
2463 }
2464
2465 /*
2466 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2467 */
2468 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2469 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2470 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2471 {
2472 uint8_t bUnmapInfoDescCurTss;
2473 PX86DESC pDescCurTss;
2474 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2475 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2476 if (rcStrict != VINF_SUCCESS)
2477 {
2478 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2479 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2480 return rcStrict;
2481 }
2482
2483 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2484 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2485 if (rcStrict != VINF_SUCCESS)
2486 {
2487 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2488 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2489 return rcStrict;
2490 }
2491
2492 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2493 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2494 {
2495 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2496 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2497 fEFlags &= ~X86_EFL_NT;
2498 }
2499 }
2500
2501 /*
2502 * Save the CPU state into the current TSS.
2503 */
2504 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2505 if (GCPtrNewTss == GCPtrCurTss)
2506 {
2507 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2508 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2509 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2510 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2511 pVCpu->cpum.GstCtx.ldtr.Sel));
2512 }
2513 if (fIsNewTss386)
2514 {
2515 /*
2516 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2517 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2518 */
2519 uint8_t bUnmapInfoCurTss32;
2520 void *pvCurTss32;
2521 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2522 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2523 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2524 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2525 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2526 if (rcStrict != VINF_SUCCESS)
2527 {
2528 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2529 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2530 return rcStrict;
2531 }
2532
2533 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2534 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2535 pCurTss32->eip = uNextEip;
2536 pCurTss32->eflags = fEFlags;
2537 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2538 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2539 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2540 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2541 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2542 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2543 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2544 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2545 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2546 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2547 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2548 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2549 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2550 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2551
2552 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2553 if (rcStrict != VINF_SUCCESS)
2554 {
2555 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2556 VBOXSTRICTRC_VAL(rcStrict)));
2557 return rcStrict;
2558 }
2559 }
2560 else
2561 {
2562 /*
2563 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2564 */
2565 uint8_t bUnmapInfoCurTss16;
2566 void *pvCurTss16;
2567 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2568 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2569 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2570 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2571 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2572 if (rcStrict != VINF_SUCCESS)
2573 {
2574 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2575 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2576 return rcStrict;
2577 }
2578
2579 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2580 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2581 pCurTss16->ip = uNextEip;
2582 pCurTss16->flags = (uint16_t)fEFlags;
2583 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2584 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2585 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2586 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2587 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2588 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2589 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2590 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2591 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2592 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2593 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2594 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2595
2596 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2597 if (rcStrict != VINF_SUCCESS)
2598 {
2599 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2600 VBOXSTRICTRC_VAL(rcStrict)));
2601 return rcStrict;
2602 }
2603 }
2604
2605 /*
2606 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2607 */
2608 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2609 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2610 {
2611 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2612 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2613 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2614 }
2615
2616 /*
2617 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2618 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2619 */
2620 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2621 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2622 bool fNewDebugTrap;
2623 if (fIsNewTss386)
2624 {
2625 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2626 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2627 uNewEip = pNewTss32->eip;
2628 uNewEflags = pNewTss32->eflags;
2629 uNewEax = pNewTss32->eax;
2630 uNewEcx = pNewTss32->ecx;
2631 uNewEdx = pNewTss32->edx;
2632 uNewEbx = pNewTss32->ebx;
2633 uNewEsp = pNewTss32->esp;
2634 uNewEbp = pNewTss32->ebp;
2635 uNewEsi = pNewTss32->esi;
2636 uNewEdi = pNewTss32->edi;
2637 uNewES = pNewTss32->es;
2638 uNewCS = pNewTss32->cs;
2639 uNewSS = pNewTss32->ss;
2640 uNewDS = pNewTss32->ds;
2641 uNewFS = pNewTss32->fs;
2642 uNewGS = pNewTss32->gs;
2643 uNewLdt = pNewTss32->selLdt;
2644 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2645 }
2646 else
2647 {
2648 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2649 uNewCr3 = 0;
2650 uNewEip = pNewTss16->ip;
2651 uNewEflags = pNewTss16->flags;
2652 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2653 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2654 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2655 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2656 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2657 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2658 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2659 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2660 uNewES = pNewTss16->es;
2661 uNewCS = pNewTss16->cs;
2662 uNewSS = pNewTss16->ss;
2663 uNewDS = pNewTss16->ds;
2664 uNewFS = 0;
2665 uNewGS = 0;
2666 uNewLdt = pNewTss16->selLdt;
2667 fNewDebugTrap = false;
2668 }
2669
2670 if (GCPtrNewTss == GCPtrCurTss)
2671 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2672 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2673
2674 /*
2675 * We're done accessing the new TSS.
2676 */
2677 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2678 if (rcStrict != VINF_SUCCESS)
2679 {
2680 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2681 return rcStrict;
2682 }
2683
2684 /*
2685 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2686 */
2687 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2688 {
2689 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2690 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2691 if (rcStrict != VINF_SUCCESS)
2692 {
2693 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2694 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2695 return rcStrict;
2696 }
2697
2698 /* Check that the descriptor indicates the new TSS is available (not busy). */
2699 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2700 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2701 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2702
2703 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2704 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2705 if (rcStrict != VINF_SUCCESS)
2706 {
2707 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2708 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2709 return rcStrict;
2710 }
2711 }
2712
2713 /*
2714 * From this point on, we're technically in the new task. We will defer exceptions
2715 * until the completion of the task switch but before executing any instructions in the new task.
2716 */
2717 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2718 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2719 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2720 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2721 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2722 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2723 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2724
2725 /* Set the busy bit in TR. */
2726 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2727
2728 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2729 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2730 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2731 {
2732 uNewEflags |= X86_EFL_NT;
2733 }
2734
2735 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2736 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2737 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2738
2739 pVCpu->cpum.GstCtx.eip = uNewEip;
2740 pVCpu->cpum.GstCtx.eax = uNewEax;
2741 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2742 pVCpu->cpum.GstCtx.edx = uNewEdx;
2743 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2744 pVCpu->cpum.GstCtx.esp = uNewEsp;
2745 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2746 pVCpu->cpum.GstCtx.esi = uNewEsi;
2747 pVCpu->cpum.GstCtx.edi = uNewEdi;
2748
2749 uNewEflags &= X86_EFL_LIVE_MASK;
2750 uNewEflags |= X86_EFL_RA1_MASK;
2751 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2752
2753 /*
2754 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2755 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2756 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2757 */
2758 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2759 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2760
2761 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2762 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2763
2764 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2765 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2766
2767 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2768 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2769
2770 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2771 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2772
2773 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2774 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2775 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2776
2777 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2778 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2779 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2780 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2781
2782 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2783 {
2784 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2785 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2786 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2787 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2788 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2789 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2790 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2791 }
2792
2793 /*
2794 * Switch CR3 for the new task.
2795 */
2796 if ( fIsNewTss386
2797 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2798 {
2799 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2800 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2801 AssertRCSuccessReturn(rc, rc);
2802
2803 /* Inform PGM. */
2804 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2805 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2806 AssertRCReturn(rc, rc);
2807 /* ignore informational status codes */
2808
2809 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2810 }
2811
2812 /*
2813 * Switch LDTR for the new task.
2814 */
2815 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2816 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2817 else
2818 {
2819 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2820
2821 IEMSELDESC DescNewLdt;
2822 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2823 if (rcStrict != VINF_SUCCESS)
2824 {
2825 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2826 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2827 return rcStrict;
2828 }
2829 if ( !DescNewLdt.Legacy.Gen.u1Present
2830 || DescNewLdt.Legacy.Gen.u1DescType
2831 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2832 {
2833 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2834 uNewLdt, DescNewLdt.Legacy.u));
2835 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2836 }
2837
2838 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2839 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2840 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2841 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2842 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2843 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2844 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2845 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2846 }
2847
2848 IEMSELDESC DescSS;
2849 if (IEM_IS_V86_MODE(pVCpu))
2850 {
2851 IEM_SET_CPL(pVCpu, 3);
2852 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2853 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2854 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2855 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2856 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2857 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2858
2859 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2860 DescSS.Legacy.u = 0;
2861 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2862 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2863 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2864 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2865 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2866 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2867 DescSS.Legacy.Gen.u2Dpl = 3;
2868 }
2869 else
2870 {
2871 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2872
2873 /*
2874 * Load the stack segment for the new task.
2875 */
2876 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2877 {
2878 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2879 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2880 }
2881
2882 /* Fetch the descriptor. */
2883 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2884 if (rcStrict != VINF_SUCCESS)
2885 {
2886 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2887 VBOXSTRICTRC_VAL(rcStrict)));
2888 return rcStrict;
2889 }
2890
2891 /* SS must be a data segment and writable. */
2892 if ( !DescSS.Legacy.Gen.u1DescType
2893 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2894 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2895 {
2896 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2897 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2898 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2899 }
2900
2901 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2902 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2903 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2904 {
2905 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2906 uNewCpl));
2907 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2908 }
2909
2910 /* Is it there? */
2911 if (!DescSS.Legacy.Gen.u1Present)
2912 {
2913 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2914 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2915 }
2916
2917 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2918 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2919
2920 /* Set the accessed bit before committing the result into SS. */
2921 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2922 {
2923 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2924 if (rcStrict != VINF_SUCCESS)
2925 return rcStrict;
2926 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2927 }
2928
2929 /* Commit SS. */
2930 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2931 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2932 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2933 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2934 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2935 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2936 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2937
2938 /* CPL has changed, update IEM before loading rest of segments. */
2939 IEM_SET_CPL(pVCpu, uNewCpl);
2940
2941 /*
2942 * Load the data segments for the new task.
2943 */
2944 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2945 if (rcStrict != VINF_SUCCESS)
2946 return rcStrict;
2947 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2948 if (rcStrict != VINF_SUCCESS)
2949 return rcStrict;
2950 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2951 if (rcStrict != VINF_SUCCESS)
2952 return rcStrict;
2953 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2954 if (rcStrict != VINF_SUCCESS)
2955 return rcStrict;
2956
2957 /*
2958 * Load the code segment for the new task.
2959 */
2960 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2961 {
2962 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2963 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2964 }
2965
2966 /* Fetch the descriptor. */
2967 IEMSELDESC DescCS;
2968 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2969 if (rcStrict != VINF_SUCCESS)
2970 {
2971 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2972 return rcStrict;
2973 }
2974
2975 /* CS must be a code segment. */
2976 if ( !DescCS.Legacy.Gen.u1DescType
2977 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2978 {
2979 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2980 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2981 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2982 }
2983
2984 /* For conforming CS, DPL must be less than or equal to the RPL. */
2985 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2986 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2987 {
2988 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2989 DescCS.Legacy.Gen.u2Dpl));
2990 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2991 }
2992
2993 /* For non-conforming CS, DPL must match RPL. */
2994 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2995 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2996 {
2997 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2998 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2999 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3000 }
3001
3002 /* Is it there? */
3003 if (!DescCS.Legacy.Gen.u1Present)
3004 {
3005 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3006 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3007 }
3008
3009 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3010 u64Base = X86DESC_BASE(&DescCS.Legacy);
3011
3012 /* Set the accessed bit before committing the result into CS. */
3013 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3014 {
3015 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3016 if (rcStrict != VINF_SUCCESS)
3017 return rcStrict;
3018 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3019 }
3020
3021 /* Commit CS. */
3022 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3023 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3024 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3025 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3026 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3027 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3028 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3029 }
3030
3031 /* Make sure the CPU mode is correct. */
3032 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3033 if (fExecNew != pVCpu->iem.s.fExec)
3034 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3035 pVCpu->iem.s.fExec = fExecNew;
3036
3037 /** @todo Debug trap. */
3038 if (fIsNewTss386 && fNewDebugTrap)
3039 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3040
3041 /*
3042 * Construct the error code masks based on what caused this task switch.
3043 * See Intel Instruction reference for INT.
3044 */
3045 uint16_t uExt;
3046 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3047 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3048 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3049 uExt = 1;
3050 else
3051 uExt = 0;
3052
3053 /*
3054 * Push any error code on to the new stack.
3055 */
3056 if (fFlags & IEM_XCPT_FLAGS_ERR)
3057 {
3058 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3059 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3060 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3061
3062 /* Check that there is sufficient space on the stack. */
3063 /** @todo Factor out segment limit checking for normal/expand down segments
3064 * into a separate function. */
3065 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3066 {
3067 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3068 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3069 {
3070 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3071 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3072 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3073 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3074 }
3075 }
3076 else
3077 {
3078 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3079 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3080 {
3081 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3082 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3083 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3084 }
3085 }
3086
3087
3088 if (fIsNewTss386)
3089 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3090 else
3091 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3092 if (rcStrict != VINF_SUCCESS)
3093 {
3094 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3095 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3096 return rcStrict;
3097 }
3098 }
3099
3100 /* Check the new EIP against the new CS limit. */
3101 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3102 {
3103 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3104 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3105 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3106 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3107 }
3108
3109 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3110 pVCpu->cpum.GstCtx.ss.Sel));
3111 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3112}
3113
3114
3115/**
3116 * Implements exceptions and interrupts for protected mode.
3117 *
3118 * @returns VBox strict status code.
3119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3120 * @param cbInstr The number of bytes to offset rIP by in the return
3121 * address.
3122 * @param u8Vector The interrupt / exception vector number.
3123 * @param fFlags The flags.
3124 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3125 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3126 */
3127static VBOXSTRICTRC
3128iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3129 uint8_t cbInstr,
3130 uint8_t u8Vector,
3131 uint32_t fFlags,
3132 uint16_t uErr,
3133 uint64_t uCr2) RT_NOEXCEPT
3134{
3135 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3136
3137 /*
3138 * Read the IDT entry.
3139 */
3140 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3141 {
3142 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3143 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3144 }
3145 X86DESC Idte;
3146 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3147 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3148 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3149 {
3150 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3151 return rcStrict;
3152 }
3153 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3154 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3155 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3156 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3157
3158 /*
3159 * Check the descriptor type, DPL and such.
3160 * ASSUMES this is done in the same order as described for call-gate calls.
3161 */
3162 if (Idte.Gate.u1DescType)
3163 {
3164 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3165 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3166 }
3167 bool fTaskGate = false;
3168 uint8_t f32BitGate = true;
3169 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3170 switch (Idte.Gate.u4Type)
3171 {
3172 case X86_SEL_TYPE_SYS_UNDEFINED:
3173 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3174 case X86_SEL_TYPE_SYS_LDT:
3175 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3176 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3177 case X86_SEL_TYPE_SYS_UNDEFINED2:
3178 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3179 case X86_SEL_TYPE_SYS_UNDEFINED3:
3180 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3181 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3182 case X86_SEL_TYPE_SYS_UNDEFINED4:
3183 {
3184 /** @todo check what actually happens when the type is wrong...
3185 * esp. call gates. */
3186 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3187 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3188 }
3189
3190 case X86_SEL_TYPE_SYS_286_INT_GATE:
3191 f32BitGate = false;
3192 RT_FALL_THRU();
3193 case X86_SEL_TYPE_SYS_386_INT_GATE:
3194 fEflToClear |= X86_EFL_IF;
3195 break;
3196
3197 case X86_SEL_TYPE_SYS_TASK_GATE:
3198 fTaskGate = true;
3199#ifndef IEM_IMPLEMENTS_TASKSWITCH
3200 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3201#endif
3202 break;
3203
3204 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3205 f32BitGate = false;
3206 break;
3207 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3208 break;
3209
3210 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3211 }
3212
3213 /* Check DPL against CPL if applicable. */
3214 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3215 {
3216 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3217 {
3218 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3219 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3220 }
3221 }
3222
3223 /* Is it there? */
3224 if (!Idte.Gate.u1Present)
3225 {
3226 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3227 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3228 }
3229
3230 /* Is it a task-gate? */
3231 if (fTaskGate)
3232 {
3233 /*
3234 * Construct the error code masks based on what caused this task switch.
3235 * See Intel Instruction reference for INT.
3236 */
3237 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3238 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3239 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3240 RTSEL SelTss = Idte.Gate.u16Sel;
3241
3242 /*
3243 * Fetch the TSS descriptor in the GDT.
3244 */
3245 IEMSELDESC DescTSS;
3246 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3247 if (rcStrict != VINF_SUCCESS)
3248 {
3249 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3250 VBOXSTRICTRC_VAL(rcStrict)));
3251 return rcStrict;
3252 }
3253
3254 /* The TSS descriptor must be a system segment and be available (not busy). */
3255 if ( DescTSS.Legacy.Gen.u1DescType
3256 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3257 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3258 {
3259 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3260 u8Vector, SelTss, DescTSS.Legacy.au64));
3261 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3262 }
3263
3264 /* The TSS must be present. */
3265 if (!DescTSS.Legacy.Gen.u1Present)
3266 {
3267 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3268 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3269 }
3270
3271 /* Do the actual task switch. */
3272 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3273 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3274 fFlags, uErr, uCr2, SelTss, &DescTSS);
3275 }
3276
3277 /* A null CS is bad. */
3278 RTSEL NewCS = Idte.Gate.u16Sel;
3279 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3280 {
3281 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3282 return iemRaiseGeneralProtectionFault0(pVCpu);
3283 }
3284
3285 /* Fetch the descriptor for the new CS. */
3286 IEMSELDESC DescCS;
3287 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3288 if (rcStrict != VINF_SUCCESS)
3289 {
3290 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3291 return rcStrict;
3292 }
3293
3294 /* Must be a code segment. */
3295 if (!DescCS.Legacy.Gen.u1DescType)
3296 {
3297 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3298 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3299 }
3300 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3301 {
3302 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3303 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3304 }
3305
3306 /* Don't allow lowering the privilege level. */
3307 /** @todo Does the lowering of privileges apply to software interrupts
3308 * only? This has bearings on the more-privileged or
3309 * same-privilege stack behavior further down. A testcase would
3310 * be nice. */
3311 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3312 {
3313 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3314 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3315 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3316 }
3317
3318 /* Make sure the selector is present. */
3319 if (!DescCS.Legacy.Gen.u1Present)
3320 {
3321 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3322 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3323 }
3324
3325#ifdef LOG_ENABLED
3326 /* If software interrupt, try decode it if logging is enabled and such. */
3327 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3328 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3329 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3330#endif
3331
3332 /* Check the new EIP against the new CS limit. */
3333 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3334 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3335 ? Idte.Gate.u16OffsetLow
3336 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3337 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3338 if (uNewEip > cbLimitCS)
3339 {
3340 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3341 u8Vector, uNewEip, cbLimitCS, NewCS));
3342 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3343 }
3344 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3345
3346 /* Calc the flag image to push. */
3347 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3348 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3349 fEfl &= ~X86_EFL_RF;
3350 else
3351 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3352
3353 /* From V8086 mode only go to CPL 0. */
3354 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3355 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3356 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3357 {
3358 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3359 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3360 }
3361
3362 /*
3363 * If the privilege level changes, we need to get a new stack from the TSS.
3364 * This in turns means validating the new SS and ESP...
3365 */
3366 if (uNewCpl != IEM_GET_CPL(pVCpu))
3367 {
3368 RTSEL NewSS;
3369 uint32_t uNewEsp;
3370 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3371 if (rcStrict != VINF_SUCCESS)
3372 return rcStrict;
3373
3374 IEMSELDESC DescSS;
3375 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3376 if (rcStrict != VINF_SUCCESS)
3377 return rcStrict;
3378 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3379 if (!DescSS.Legacy.Gen.u1DefBig)
3380 {
3381 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3382 uNewEsp = (uint16_t)uNewEsp;
3383 }
3384
3385 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3386
3387 /* Check that there is sufficient space for the stack frame. */
3388 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3389 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3390 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3391 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3392
3393 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3394 {
3395 if ( uNewEsp - 1 > cbLimitSS
3396 || uNewEsp < cbStackFrame)
3397 {
3398 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3399 u8Vector, NewSS, uNewEsp, cbStackFrame));
3400 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3401 }
3402 }
3403 else
3404 {
3405 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3406 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3407 {
3408 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3409 u8Vector, NewSS, uNewEsp, cbStackFrame));
3410 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3411 }
3412 }
3413
3414 /*
3415 * Start making changes.
3416 */
3417
3418 /* Set the new CPL so that stack accesses use it. */
3419 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3420 IEM_SET_CPL(pVCpu, uNewCpl);
3421
3422 /* Create the stack frame. */
3423 uint8_t bUnmapInfoStackFrame;
3424 RTPTRUNION uStackFrame;
3425 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3426 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3427 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3428 if (rcStrict != VINF_SUCCESS)
3429 return rcStrict;
3430 if (f32BitGate)
3431 {
3432 if (fFlags & IEM_XCPT_FLAGS_ERR)
3433 *uStackFrame.pu32++ = uErr;
3434 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3435 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3436 uStackFrame.pu32[2] = fEfl;
3437 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3438 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3439 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3440 if (fEfl & X86_EFL_VM)
3441 {
3442 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3443 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3444 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3445 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3446 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3447 }
3448 }
3449 else
3450 {
3451 if (fFlags & IEM_XCPT_FLAGS_ERR)
3452 *uStackFrame.pu16++ = uErr;
3453 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3454 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3455 uStackFrame.pu16[2] = fEfl;
3456 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3457 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3458 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3459 if (fEfl & X86_EFL_VM)
3460 {
3461 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3462 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3463 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3464 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3465 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3466 }
3467 }
3468 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3469 if (rcStrict != VINF_SUCCESS)
3470 return rcStrict;
3471
3472 /* Mark the selectors 'accessed' (hope this is the correct time). */
3473 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3474 * after pushing the stack frame? (Write protect the gdt + stack to
3475 * find out.) */
3476 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3477 {
3478 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3479 if (rcStrict != VINF_SUCCESS)
3480 return rcStrict;
3481 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3482 }
3483
3484 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3485 {
3486 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3487 if (rcStrict != VINF_SUCCESS)
3488 return rcStrict;
3489 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3490 }
3491
3492 /*
3493 * Start comitting the register changes (joins with the DPL=CPL branch).
3494 */
3495 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3496 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3497 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3498 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3499 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3500 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3501 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3502 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3503 * SP is loaded).
3504 * Need to check the other combinations too:
3505 * - 16-bit TSS, 32-bit handler
3506 * - 32-bit TSS, 16-bit handler */
3507 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3508 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3509 else
3510 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3511
3512 if (fEfl & X86_EFL_VM)
3513 {
3514 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3515 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3516 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3517 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3518 }
3519 }
3520 /*
3521 * Same privilege, no stack change and smaller stack frame.
3522 */
3523 else
3524 {
3525 uint64_t uNewRsp;
3526 uint8_t bUnmapInfoStackFrame;
3527 RTPTRUNION uStackFrame;
3528 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3529 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3530 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3531 if (rcStrict != VINF_SUCCESS)
3532 return rcStrict;
3533
3534 if (f32BitGate)
3535 {
3536 if (fFlags & IEM_XCPT_FLAGS_ERR)
3537 *uStackFrame.pu32++ = uErr;
3538 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3539 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3540 uStackFrame.pu32[2] = fEfl;
3541 }
3542 else
3543 {
3544 if (fFlags & IEM_XCPT_FLAGS_ERR)
3545 *uStackFrame.pu16++ = uErr;
3546 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3547 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3548 uStackFrame.pu16[2] = fEfl;
3549 }
3550 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3551 if (rcStrict != VINF_SUCCESS)
3552 return rcStrict;
3553
3554 /* Mark the CS selector as 'accessed'. */
3555 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3556 {
3557 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3558 if (rcStrict != VINF_SUCCESS)
3559 return rcStrict;
3560 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3561 }
3562
3563 /*
3564 * Start committing the register changes (joins with the other branch).
3565 */
3566 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3567 }
3568
3569 /* ... register committing continues. */
3570 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3571 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3572 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3573 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3574 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3575 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3576
3577 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3578 fEfl &= ~fEflToClear;
3579 IEMMISC_SET_EFL(pVCpu, fEfl);
3580
3581 if (fFlags & IEM_XCPT_FLAGS_CR2)
3582 pVCpu->cpum.GstCtx.cr2 = uCr2;
3583
3584 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3585 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3586
3587 /* Make sure the execution flags are correct. */
3588 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3589 if (fExecNew != pVCpu->iem.s.fExec)
3590 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3591 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3592 pVCpu->iem.s.fExec = fExecNew;
3593 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3594
3595 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3596}
3597
3598
3599/**
3600 * Implements exceptions and interrupts for long mode.
3601 *
3602 * @returns VBox strict status code.
3603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3604 * @param cbInstr The number of bytes to offset rIP by in the return
3605 * address.
3606 * @param u8Vector The interrupt / exception vector number.
3607 * @param fFlags The flags.
3608 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3609 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3610 */
3611static VBOXSTRICTRC
3612iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3613 uint8_t cbInstr,
3614 uint8_t u8Vector,
3615 uint32_t fFlags,
3616 uint16_t uErr,
3617 uint64_t uCr2) RT_NOEXCEPT
3618{
3619 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3620
3621 /*
3622 * Read the IDT entry.
3623 */
3624 uint16_t offIdt = (uint16_t)u8Vector << 4;
3625 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3626 {
3627 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3628 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3629 }
3630 X86DESC64 Idte;
3631#ifdef _MSC_VER /* Shut up silly compiler warning. */
3632 Idte.au64[0] = 0;
3633 Idte.au64[1] = 0;
3634#endif
3635 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3636 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3637 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3638 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3639 {
3640 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3641 return rcStrict;
3642 }
3643 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3644 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3645 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3646
3647 /*
3648 * Check the descriptor type, DPL and such.
3649 * ASSUMES this is done in the same order as described for call-gate calls.
3650 */
3651 if (Idte.Gate.u1DescType)
3652 {
3653 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3654 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3655 }
3656 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3657 switch (Idte.Gate.u4Type)
3658 {
3659 case AMD64_SEL_TYPE_SYS_INT_GATE:
3660 fEflToClear |= X86_EFL_IF;
3661 break;
3662 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3663 break;
3664
3665 default:
3666 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3667 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3668 }
3669
3670 /* Check DPL against CPL if applicable. */
3671 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3672 {
3673 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3674 {
3675 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3676 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3677 }
3678 }
3679
3680 /* Is it there? */
3681 if (!Idte.Gate.u1Present)
3682 {
3683 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3684 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3685 }
3686
3687 /* A null CS is bad. */
3688 RTSEL NewCS = Idte.Gate.u16Sel;
3689 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3690 {
3691 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3692 return iemRaiseGeneralProtectionFault0(pVCpu);
3693 }
3694
3695 /* Fetch the descriptor for the new CS. */
3696 IEMSELDESC DescCS;
3697 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3698 if (rcStrict != VINF_SUCCESS)
3699 {
3700 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3701 return rcStrict;
3702 }
3703
3704 /* Must be a 64-bit code segment. */
3705 if (!DescCS.Long.Gen.u1DescType)
3706 {
3707 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3708 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3709 }
3710 if ( !DescCS.Long.Gen.u1Long
3711 || DescCS.Long.Gen.u1DefBig
3712 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3713 {
3714 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3715 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3716 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3717 }
3718
3719 /* Don't allow lowering the privilege level. For non-conforming CS
3720 selectors, the CS.DPL sets the privilege level the trap/interrupt
3721 handler runs at. For conforming CS selectors, the CPL remains
3722 unchanged, but the CS.DPL must be <= CPL. */
3723 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3724 * when CPU in Ring-0. Result \#GP? */
3725 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3726 {
3727 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3728 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3729 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3730 }
3731
3732
3733 /* Make sure the selector is present. */
3734 if (!DescCS.Legacy.Gen.u1Present)
3735 {
3736 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3737 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3738 }
3739
3740 /* Check that the new RIP is canonical. */
3741 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3742 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3743 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3744 if (!IEM_IS_CANONICAL(uNewRip))
3745 {
3746 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3747 return iemRaiseGeneralProtectionFault0(pVCpu);
3748 }
3749
3750 /*
3751 * If the privilege level changes or if the IST isn't zero, we need to get
3752 * a new stack from the TSS.
3753 */
3754 uint64_t uNewRsp;
3755 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3756 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3757 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3758 || Idte.Gate.u3IST != 0)
3759 {
3760 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3761 if (rcStrict != VINF_SUCCESS)
3762 return rcStrict;
3763 }
3764 else
3765 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3766 uNewRsp &= ~(uint64_t)0xf;
3767
3768 /*
3769 * Calc the flag image to push.
3770 */
3771 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3772 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3773 fEfl &= ~X86_EFL_RF;
3774 else
3775 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3776
3777 /*
3778 * Start making changes.
3779 */
3780 /* Set the new CPL so that stack accesses use it. */
3781 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3782 IEM_SET_CPL(pVCpu, uNewCpl);
3783/** @todo Setting CPL this early seems wrong as it would affect and errors we
3784 * raise accessing the stack and (?) GDT/LDT... */
3785
3786 /* Create the stack frame. */
3787 uint8_t bUnmapInfoStackFrame;
3788 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3789 RTPTRUNION uStackFrame;
3790 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3791 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3792 if (rcStrict != VINF_SUCCESS)
3793 return rcStrict;
3794
3795 if (fFlags & IEM_XCPT_FLAGS_ERR)
3796 *uStackFrame.pu64++ = uErr;
3797 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3798 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3799 uStackFrame.pu64[2] = fEfl;
3800 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3801 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3802 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3803 if (rcStrict != VINF_SUCCESS)
3804 return rcStrict;
3805
3806 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3807 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3808 * after pushing the stack frame? (Write protect the gdt + stack to
3809 * find out.) */
3810 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3811 {
3812 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3813 if (rcStrict != VINF_SUCCESS)
3814 return rcStrict;
3815 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3816 }
3817
3818 /*
3819 * Start comitting the register changes.
3820 */
3821 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3822 * hidden registers when interrupting 32-bit or 16-bit code! */
3823 if (uNewCpl != uOldCpl)
3824 {
3825 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3826 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3827 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3828 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3829 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3830 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3831 }
3832 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3833 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3834 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3835 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3836 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3837 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3838 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3839 pVCpu->cpum.GstCtx.rip = uNewRip;
3840
3841 fEfl &= ~fEflToClear;
3842 IEMMISC_SET_EFL(pVCpu, fEfl);
3843
3844 if (fFlags & IEM_XCPT_FLAGS_CR2)
3845 pVCpu->cpum.GstCtx.cr2 = uCr2;
3846
3847 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3848 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3849
3850 iemRecalcExecModeAndCplFlags(pVCpu);
3851
3852 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3853}
3854
3855
3856/**
3857 * Implements exceptions and interrupts.
3858 *
3859 * All exceptions and interrupts goes thru this function!
3860 *
3861 * @returns VBox strict status code.
3862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3863 * @param cbInstr The number of bytes to offset rIP by in the return
3864 * address.
3865 * @param u8Vector The interrupt / exception vector number.
3866 * @param fFlags The flags.
3867 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3868 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3869 */
3870VBOXSTRICTRC
3871iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3872 uint8_t cbInstr,
3873 uint8_t u8Vector,
3874 uint32_t fFlags,
3875 uint16_t uErr,
3876 uint64_t uCr2) RT_NOEXCEPT
3877{
3878 /*
3879 * Get all the state that we might need here.
3880 */
3881 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3882 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3883
3884#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3885 /*
3886 * Flush prefetch buffer
3887 */
3888 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3889#endif
3890
3891 /*
3892 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3893 */
3894 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3895 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3896 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3897 | IEM_XCPT_FLAGS_BP_INSTR
3898 | IEM_XCPT_FLAGS_ICEBP_INSTR
3899 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3900 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3901 {
3902 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3903 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3904 u8Vector = X86_XCPT_GP;
3905 uErr = 0;
3906 }
3907
3908 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3909#ifdef DBGFTRACE_ENABLED
3910 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3911 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3912 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3913#endif
3914
3915 /*
3916 * Check if DBGF wants to intercept the exception.
3917 */
3918 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
3919 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
3920 { /* likely */ }
3921 else
3922 {
3923 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
3924 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
3925 if (rcStrict != VINF_SUCCESS)
3926 return rcStrict;
3927 }
3928
3929 /*
3930 * Evaluate whether NMI blocking should be in effect.
3931 * Normally, NMI blocking is in effect whenever we inject an NMI.
3932 */
3933 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3934 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3935
3936#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3937 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3938 {
3939 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3940 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3941 return rcStrict0;
3942
3943 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3944 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3945 {
3946 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3947 fBlockNmi = false;
3948 }
3949 }
3950#endif
3951
3952#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3953 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3954 {
3955 /*
3956 * If the event is being injected as part of VMRUN, it isn't subject to event
3957 * intercepts in the nested-guest. However, secondary exceptions that occur
3958 * during injection of any event -are- subject to exception intercepts.
3959 *
3960 * See AMD spec. 15.20 "Event Injection".
3961 */
3962 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3963 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3964 else
3965 {
3966 /*
3967 * Check and handle if the event being raised is intercepted.
3968 */
3969 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3970 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3971 return rcStrict0;
3972 }
3973 }
3974#endif
3975
3976 /*
3977 * Set NMI blocking if necessary.
3978 */
3979 if (fBlockNmi)
3980 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3981
3982 /*
3983 * Do recursion accounting.
3984 */
3985 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3986 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3987 if (pVCpu->iem.s.cXcptRecursions == 0)
3988 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3989 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3990 else
3991 {
3992 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3993 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3994 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3995
3996 if (pVCpu->iem.s.cXcptRecursions >= 4)
3997 {
3998#ifdef DEBUG_bird
3999 AssertFailed();
4000#endif
4001 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4002 }
4003
4004 /*
4005 * Evaluate the sequence of recurring events.
4006 */
4007 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4008 NULL /* pXcptRaiseInfo */);
4009 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4010 { /* likely */ }
4011 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4012 {
4013 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4014 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4015 u8Vector = X86_XCPT_DF;
4016 uErr = 0;
4017#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4018 /* VMX nested-guest #DF intercept needs to be checked here. */
4019 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4020 {
4021 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4022 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4023 return rcStrict0;
4024 }
4025#endif
4026 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4027 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4028 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4029 }
4030 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4031 {
4032 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4033 return iemInitiateCpuShutdown(pVCpu);
4034 }
4035 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4036 {
4037 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4038 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4039 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4040 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4041 return VERR_EM_GUEST_CPU_HANG;
4042 }
4043 else
4044 {
4045 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4046 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4047 return VERR_IEM_IPE_9;
4048 }
4049
4050 /*
4051 * The 'EXT' bit is set when an exception occurs during deliver of an external
4052 * event (such as an interrupt or earlier exception)[1]. Privileged software
4053 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4054 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4055 *
4056 * [1] - Intel spec. 6.13 "Error Code"
4057 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4058 * [3] - Intel Instruction reference for INT n.
4059 */
4060 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4061 && (fFlags & IEM_XCPT_FLAGS_ERR)
4062 && u8Vector != X86_XCPT_PF
4063 && u8Vector != X86_XCPT_DF)
4064 {
4065 uErr |= X86_TRAP_ERR_EXTERNAL;
4066 }
4067 }
4068
4069 pVCpu->iem.s.cXcptRecursions++;
4070 pVCpu->iem.s.uCurXcpt = u8Vector;
4071 pVCpu->iem.s.fCurXcpt = fFlags;
4072 pVCpu->iem.s.uCurXcptErr = uErr;
4073 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4074
4075 /*
4076 * Extensive logging.
4077 */
4078#if defined(LOG_ENABLED) && defined(IN_RING3)
4079 if (LogIs3Enabled())
4080 {
4081 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4082 char szRegs[4096];
4083 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4084 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4085 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4086 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4087 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4088 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4089 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4090 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4091 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4092 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4093 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4094 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4095 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4096 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4097 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4098 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4099 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4100 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4101 " efer=%016VR{efer}\n"
4102 " pat=%016VR{pat}\n"
4103 " sf_mask=%016VR{sf_mask}\n"
4104 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4105 " lstar=%016VR{lstar}\n"
4106 " star=%016VR{star} cstar=%016VR{cstar}\n"
4107 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4108 );
4109
4110 char szInstr[256];
4111 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4112 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4113 szInstr, sizeof(szInstr), NULL);
4114 Log3(("%s%s\n", szRegs, szInstr));
4115 }
4116#endif /* LOG_ENABLED */
4117
4118 /*
4119 * Stats.
4120 */
4121 uint64_t const uTimestamp = ASMReadTSC();
4122 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4123 {
4124 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4125 EMHistoryAddExit(pVCpu,
4126 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4127 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4128 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4129 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4130 }
4131 else
4132 {
4133 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4134 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4135 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4136 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4137 if (fFlags & IEM_XCPT_FLAGS_ERR)
4138 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4139 if (fFlags & IEM_XCPT_FLAGS_CR2)
4140 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4141 }
4142
4143 /*
4144 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4145 * to ensure that a stale TLB or paging cache entry will only cause one
4146 * spurious #PF.
4147 */
4148 if ( u8Vector == X86_XCPT_PF
4149 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4150 IEMTlbInvalidatePage(pVCpu, uCr2);
4151
4152 /*
4153 * Call the mode specific worker function.
4154 */
4155 VBOXSTRICTRC rcStrict;
4156 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4157 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4158 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4159 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4160 else
4161 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4162
4163 /* Flush the prefetch buffer. */
4164 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4165
4166 /*
4167 * Unwind.
4168 */
4169 pVCpu->iem.s.cXcptRecursions--;
4170 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4171 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4172 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4173 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4174 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4175 return rcStrict;
4176}
4177
4178#ifdef IEM_WITH_SETJMP
4179/**
4180 * See iemRaiseXcptOrInt. Will not return.
4181 */
4182DECL_NO_RETURN(void)
4183iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4184 uint8_t cbInstr,
4185 uint8_t u8Vector,
4186 uint32_t fFlags,
4187 uint16_t uErr,
4188 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4189{
4190 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4191 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4192}
4193#endif
4194
4195
4196/** \#DE - 00. */
4197VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4198{
4199 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4200}
4201
4202
4203#ifdef IEM_WITH_SETJMP
4204/** \#DE - 00. */
4205DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4206{
4207 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4208}
4209#endif
4210
4211
4212/** \#DB - 01.
4213 * @note This automatically clear DR7.GD. */
4214VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4215{
4216 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4217 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4218 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4219}
4220
4221
4222/** \#BR - 05. */
4223VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4224{
4225 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4226}
4227
4228
4229/** \#UD - 06. */
4230VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4231{
4232 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4233}
4234
4235
4236#ifdef IEM_WITH_SETJMP
4237/** \#UD - 06. */
4238DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4239{
4240 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4241}
4242#endif
4243
4244
4245/** \#NM - 07. */
4246VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4247{
4248 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4249}
4250
4251
4252#ifdef IEM_WITH_SETJMP
4253/** \#NM - 07. */
4254DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4255{
4256 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4257}
4258#endif
4259
4260
4261/** \#TS(err) - 0a. */
4262VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4263{
4264 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4265}
4266
4267
4268/** \#TS(tr) - 0a. */
4269VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4270{
4271 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4272 pVCpu->cpum.GstCtx.tr.Sel, 0);
4273}
4274
4275
4276/** \#TS(0) - 0a. */
4277VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4278{
4279 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4280 0, 0);
4281}
4282
4283
4284/** \#TS(err) - 0a. */
4285VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4286{
4287 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4288 uSel & X86_SEL_MASK_OFF_RPL, 0);
4289}
4290
4291
4292/** \#NP(err) - 0b. */
4293VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4294{
4295 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4296}
4297
4298
4299/** \#NP(sel) - 0b. */
4300VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4301{
4302 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4303 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4304 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4305 uSel & ~X86_SEL_RPL, 0);
4306}
4307
4308
4309/** \#SS(seg) - 0c. */
4310VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4311{
4312 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4313 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4314 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4315 uSel & ~X86_SEL_RPL, 0);
4316}
4317
4318
4319/** \#SS(err) - 0c. */
4320VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4321{
4322 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4323 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4324 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4325}
4326
4327
4328/** \#GP(n) - 0d. */
4329VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4330{
4331 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4332 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4333}
4334
4335
4336/** \#GP(0) - 0d. */
4337VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4338{
4339 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4340 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4341}
4342
4343#ifdef IEM_WITH_SETJMP
4344/** \#GP(0) - 0d. */
4345DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4346{
4347 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4348 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4349}
4350#endif
4351
4352
4353/** \#GP(sel) - 0d. */
4354VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4355{
4356 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4357 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4358 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4359 Sel & ~X86_SEL_RPL, 0);
4360}
4361
4362
4363/** \#GP(0) - 0d. */
4364VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4365{
4366 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4367 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4368}
4369
4370
4371/** \#GP(sel) - 0d. */
4372VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4373{
4374 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4375 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4376 NOREF(iSegReg); NOREF(fAccess);
4377 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4378 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4379}
4380
4381#ifdef IEM_WITH_SETJMP
4382/** \#GP(sel) - 0d, longjmp. */
4383DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4384{
4385 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4386 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4387 NOREF(iSegReg); NOREF(fAccess);
4388 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4389 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4390}
4391#endif
4392
4393/** \#GP(sel) - 0d. */
4394VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4395{
4396 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4397 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4398 NOREF(Sel);
4399 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4400}
4401
4402#ifdef IEM_WITH_SETJMP
4403/** \#GP(sel) - 0d, longjmp. */
4404DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4405{
4406 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4407 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4408 NOREF(Sel);
4409 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4410}
4411#endif
4412
4413
4414/** \#GP(sel) - 0d. */
4415VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4416{
4417 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4418 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4419 NOREF(iSegReg); NOREF(fAccess);
4420 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4421}
4422
4423#ifdef IEM_WITH_SETJMP
4424/** \#GP(sel) - 0d, longjmp. */
4425DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4426{
4427 NOREF(iSegReg); NOREF(fAccess);
4428 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4429}
4430#endif
4431
4432
4433/** \#PF(n) - 0e. */
4434VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4435{
4436 uint16_t uErr;
4437 switch (rc)
4438 {
4439 case VERR_PAGE_NOT_PRESENT:
4440 case VERR_PAGE_TABLE_NOT_PRESENT:
4441 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4442 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4443 uErr = 0;
4444 break;
4445
4446 default:
4447 AssertMsgFailed(("%Rrc\n", rc));
4448 RT_FALL_THRU();
4449 case VERR_ACCESS_DENIED:
4450 uErr = X86_TRAP_PF_P;
4451 break;
4452
4453 /** @todo reserved */
4454 }
4455
4456 if (IEM_GET_CPL(pVCpu) == 3)
4457 uErr |= X86_TRAP_PF_US;
4458
4459 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4460 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4461 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4462 uErr |= X86_TRAP_PF_ID;
4463
4464#if 0 /* This is so much non-sense, really. Why was it done like that? */
4465 /* Note! RW access callers reporting a WRITE protection fault, will clear
4466 the READ flag before calling. So, read-modify-write accesses (RW)
4467 can safely be reported as READ faults. */
4468 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4469 uErr |= X86_TRAP_PF_RW;
4470#else
4471 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4472 {
4473 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4474 /// (regardless of outcome of the comparison in the latter case).
4475 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4476 uErr |= X86_TRAP_PF_RW;
4477 }
4478#endif
4479
4480 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4481 of the memory operand rather than at the start of it. (Not sure what
4482 happens if it crosses a page boundrary.) The current heuristics for
4483 this is to report the #PF for the last byte if the access is more than
4484 64 bytes. This is probably not correct, but we can work that out later,
4485 main objective now is to get FXSAVE to work like for real hardware and
4486 make bs3-cpu-basic2 work. */
4487 if (cbAccess <= 64)
4488 { /* likely*/ }
4489 else
4490 GCPtrWhere += cbAccess - 1;
4491
4492 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4493 uErr, GCPtrWhere);
4494}
4495
4496#ifdef IEM_WITH_SETJMP
4497/** \#PF(n) - 0e, longjmp. */
4498DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4499 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4500{
4501 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4502}
4503#endif
4504
4505
4506/** \#MF(0) - 10. */
4507VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4508{
4509 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4510 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4511
4512 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4513 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4514 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4515}
4516
4517#ifdef IEM_WITH_SETJMP
4518/** \#MF(0) - 10, longjmp. */
4519DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4520{
4521 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
4522}
4523#endif
4524
4525
4526/** \#AC(0) - 11. */
4527VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4528{
4529 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4530}
4531
4532#ifdef IEM_WITH_SETJMP
4533/** \#AC(0) - 11, longjmp. */
4534DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4535{
4536 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4537}
4538#endif
4539
4540
4541/** \#XF(0)/\#XM(0) - 19. */
4542VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4543{
4544 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4545}
4546
4547
4548#ifdef IEM_WITH_SETJMP
4549/** \#XF(0)/\#XM(0) - 19s, longjmp. */
4550DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4551{
4552 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
4553}
4554#endif
4555
4556
4557/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4558IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4559{
4560 NOREF(cbInstr);
4561 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4562}
4563
4564
4565/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4566IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4567{
4568 NOREF(cbInstr);
4569 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4570}
4571
4572
4573/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4574IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4575{
4576 NOREF(cbInstr);
4577 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4578}
4579
4580
4581/** @} */
4582
4583/** @name Common opcode decoders.
4584 * @{
4585 */
4586//#include <iprt/mem.h>
4587
4588/**
4589 * Used to add extra details about a stub case.
4590 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4591 */
4592void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4593{
4594#if defined(LOG_ENABLED) && defined(IN_RING3)
4595 PVM pVM = pVCpu->CTX_SUFF(pVM);
4596 char szRegs[4096];
4597 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4598 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4599 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4600 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4601 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4602 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4603 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4604 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4605 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4606 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4607 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4608 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4609 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4610 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4611 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4612 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4613 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4614 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4615 " efer=%016VR{efer}\n"
4616 " pat=%016VR{pat}\n"
4617 " sf_mask=%016VR{sf_mask}\n"
4618 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4619 " lstar=%016VR{lstar}\n"
4620 " star=%016VR{star} cstar=%016VR{cstar}\n"
4621 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4622 );
4623
4624 char szInstr[256];
4625 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4626 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4627 szInstr, sizeof(szInstr), NULL);
4628
4629 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4630#else
4631 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4632#endif
4633}
4634
4635/** @} */
4636
4637
4638
4639/** @name Register Access.
4640 * @{
4641 */
4642
4643/**
4644 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4645 *
4646 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4647 * segment limit.
4648 *
4649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4650 * @param cbInstr Instruction size.
4651 * @param offNextInstr The offset of the next instruction.
4652 * @param enmEffOpSize Effective operand size.
4653 */
4654VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4655 IEMMODE enmEffOpSize) RT_NOEXCEPT
4656{
4657 switch (enmEffOpSize)
4658 {
4659 case IEMMODE_16BIT:
4660 {
4661 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4662 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4663 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4664 pVCpu->cpum.GstCtx.rip = uNewIp;
4665 else
4666 return iemRaiseGeneralProtectionFault0(pVCpu);
4667 break;
4668 }
4669
4670 case IEMMODE_32BIT:
4671 {
4672 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4673 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4674
4675 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4676 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4677 pVCpu->cpum.GstCtx.rip = uNewEip;
4678 else
4679 return iemRaiseGeneralProtectionFault0(pVCpu);
4680 break;
4681 }
4682
4683 case IEMMODE_64BIT:
4684 {
4685 Assert(IEM_IS_64BIT_CODE(pVCpu));
4686
4687 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4688 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4689 pVCpu->cpum.GstCtx.rip = uNewRip;
4690 else
4691 return iemRaiseGeneralProtectionFault0(pVCpu);
4692 break;
4693 }
4694
4695 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4696 }
4697
4698#ifndef IEM_WITH_CODE_TLB
4699 /* Flush the prefetch buffer. */
4700 pVCpu->iem.s.cbOpcode = cbInstr;
4701#endif
4702
4703 /*
4704 * Clear RF and finish the instruction (maybe raise #DB).
4705 */
4706 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4707}
4708
4709
4710/**
4711 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4712 *
4713 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4714 * segment limit.
4715 *
4716 * @returns Strict VBox status code.
4717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4718 * @param cbInstr Instruction size.
4719 * @param offNextInstr The offset of the next instruction.
4720 */
4721VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4722{
4723 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4724
4725 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4726 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4727 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4728 pVCpu->cpum.GstCtx.rip = uNewIp;
4729 else
4730 return iemRaiseGeneralProtectionFault0(pVCpu);
4731
4732#ifndef IEM_WITH_CODE_TLB
4733 /* Flush the prefetch buffer. */
4734 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4735#endif
4736
4737 /*
4738 * Clear RF and finish the instruction (maybe raise #DB).
4739 */
4740 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4741}
4742
4743
4744/**
4745 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4746 *
4747 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4748 * segment limit.
4749 *
4750 * @returns Strict VBox status code.
4751 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4752 * @param cbInstr Instruction size.
4753 * @param offNextInstr The offset of the next instruction.
4754 * @param enmEffOpSize Effective operand size.
4755 */
4756VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4757 IEMMODE enmEffOpSize) RT_NOEXCEPT
4758{
4759 if (enmEffOpSize == IEMMODE_32BIT)
4760 {
4761 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4762
4763 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4764 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4765 pVCpu->cpum.GstCtx.rip = uNewEip;
4766 else
4767 return iemRaiseGeneralProtectionFault0(pVCpu);
4768 }
4769 else
4770 {
4771 Assert(enmEffOpSize == IEMMODE_64BIT);
4772
4773 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4774 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4775 pVCpu->cpum.GstCtx.rip = uNewRip;
4776 else
4777 return iemRaiseGeneralProtectionFault0(pVCpu);
4778 }
4779
4780#ifndef IEM_WITH_CODE_TLB
4781 /* Flush the prefetch buffer. */
4782 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4783#endif
4784
4785 /*
4786 * Clear RF and finish the instruction (maybe raise #DB).
4787 */
4788 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4789}
4790
4791/** @} */
4792
4793
4794/** @name FPU access and helpers.
4795 *
4796 * @{
4797 */
4798
4799/**
4800 * Updates the x87.DS and FPUDP registers.
4801 *
4802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4803 * @param pFpuCtx The FPU context.
4804 * @param iEffSeg The effective segment register.
4805 * @param GCPtrEff The effective address relative to @a iEffSeg.
4806 */
4807DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4808{
4809 RTSEL sel;
4810 switch (iEffSeg)
4811 {
4812 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4813 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4814 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4815 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4816 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4817 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4818 default:
4819 AssertMsgFailed(("%d\n", iEffSeg));
4820 sel = pVCpu->cpum.GstCtx.ds.Sel;
4821 }
4822 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4823 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4824 {
4825 pFpuCtx->DS = 0;
4826 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4827 }
4828 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4829 {
4830 pFpuCtx->DS = sel;
4831 pFpuCtx->FPUDP = GCPtrEff;
4832 }
4833 else
4834 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4835}
4836
4837
4838/**
4839 * Rotates the stack registers in the push direction.
4840 *
4841 * @param pFpuCtx The FPU context.
4842 * @remarks This is a complete waste of time, but fxsave stores the registers in
4843 * stack order.
4844 */
4845DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4846{
4847 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4848 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4849 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4850 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4851 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4852 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4853 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4854 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4855 pFpuCtx->aRegs[0].r80 = r80Tmp;
4856}
4857
4858
4859/**
4860 * Rotates the stack registers in the pop direction.
4861 *
4862 * @param pFpuCtx The FPU context.
4863 * @remarks This is a complete waste of time, but fxsave stores the registers in
4864 * stack order.
4865 */
4866DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4867{
4868 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4869 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4870 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4871 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4872 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4873 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4874 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4875 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4876 pFpuCtx->aRegs[7].r80 = r80Tmp;
4877}
4878
4879
4880/**
4881 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4882 * exception prevents it.
4883 *
4884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4885 * @param pResult The FPU operation result to push.
4886 * @param pFpuCtx The FPU context.
4887 */
4888static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4889{
4890 /* Update FSW and bail if there are pending exceptions afterwards. */
4891 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4892 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4893 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4894 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4895 {
4896 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4897 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4898 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4899 pFpuCtx->FSW = fFsw;
4900 return;
4901 }
4902
4903 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4904 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4905 {
4906 /* All is fine, push the actual value. */
4907 pFpuCtx->FTW |= RT_BIT(iNewTop);
4908 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4909 }
4910 else if (pFpuCtx->FCW & X86_FCW_IM)
4911 {
4912 /* Masked stack overflow, push QNaN. */
4913 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4914 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4915 }
4916 else
4917 {
4918 /* Raise stack overflow, don't push anything. */
4919 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4920 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4921 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4922 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4923 return;
4924 }
4925
4926 fFsw &= ~X86_FSW_TOP_MASK;
4927 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4928 pFpuCtx->FSW = fFsw;
4929
4930 iemFpuRotateStackPush(pFpuCtx);
4931 RT_NOREF(pVCpu);
4932}
4933
4934
4935/**
4936 * Stores a result in a FPU register and updates the FSW and FTW.
4937 *
4938 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4939 * @param pFpuCtx The FPU context.
4940 * @param pResult The result to store.
4941 * @param iStReg Which FPU register to store it in.
4942 */
4943static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4944{
4945 Assert(iStReg < 8);
4946 uint16_t fNewFsw = pFpuCtx->FSW;
4947 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4948 fNewFsw &= ~X86_FSW_C_MASK;
4949 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4950 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4951 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4952 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4953 pFpuCtx->FSW = fNewFsw;
4954 pFpuCtx->FTW |= RT_BIT(iReg);
4955 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4956 RT_NOREF(pVCpu);
4957}
4958
4959
4960/**
4961 * Only updates the FPU status word (FSW) with the result of the current
4962 * instruction.
4963 *
4964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4965 * @param pFpuCtx The FPU context.
4966 * @param u16FSW The FSW output of the current instruction.
4967 */
4968static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4969{
4970 uint16_t fNewFsw = pFpuCtx->FSW;
4971 fNewFsw &= ~X86_FSW_C_MASK;
4972 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4973 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4974 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4975 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4976 pFpuCtx->FSW = fNewFsw;
4977 RT_NOREF(pVCpu);
4978}
4979
4980
4981/**
4982 * Pops one item off the FPU stack if no pending exception prevents it.
4983 *
4984 * @param pFpuCtx The FPU context.
4985 */
4986static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4987{
4988 /* Check pending exceptions. */
4989 uint16_t uFSW = pFpuCtx->FSW;
4990 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4991 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4992 return;
4993
4994 /* TOP--. */
4995 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4996 uFSW &= ~X86_FSW_TOP_MASK;
4997 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4998 pFpuCtx->FSW = uFSW;
4999
5000 /* Mark the previous ST0 as empty. */
5001 iOldTop >>= X86_FSW_TOP_SHIFT;
5002 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5003
5004 /* Rotate the registers. */
5005 iemFpuRotateStackPop(pFpuCtx);
5006}
5007
5008
5009/**
5010 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5011 *
5012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5013 * @param pResult The FPU operation result to push.
5014 * @param uFpuOpcode The FPU opcode value.
5015 */
5016void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5017{
5018 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5019 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5020 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5021}
5022
5023
5024/**
5025 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5026 * and sets FPUDP and FPUDS.
5027 *
5028 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5029 * @param pResult The FPU operation result to push.
5030 * @param iEffSeg The effective segment register.
5031 * @param GCPtrEff The effective address relative to @a iEffSeg.
5032 * @param uFpuOpcode The FPU opcode value.
5033 */
5034void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5035 uint16_t uFpuOpcode) RT_NOEXCEPT
5036{
5037 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5038 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5039 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5040 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5041}
5042
5043
5044/**
5045 * Replace ST0 with the first value and push the second onto the FPU stack,
5046 * unless a pending exception prevents it.
5047 *
5048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5049 * @param pResult The FPU operation result to store and push.
5050 * @param uFpuOpcode The FPU opcode value.
5051 */
5052void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5053{
5054 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5055 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5056
5057 /* Update FSW and bail if there are pending exceptions afterwards. */
5058 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5059 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5060 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5061 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5062 {
5063 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5064 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5065 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5066 pFpuCtx->FSW = fFsw;
5067 return;
5068 }
5069
5070 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5071 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5072 {
5073 /* All is fine, push the actual value. */
5074 pFpuCtx->FTW |= RT_BIT(iNewTop);
5075 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5076 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5077 }
5078 else if (pFpuCtx->FCW & X86_FCW_IM)
5079 {
5080 /* Masked stack overflow, push QNaN. */
5081 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5082 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5083 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5084 }
5085 else
5086 {
5087 /* Raise stack overflow, don't push anything. */
5088 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5089 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5090 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5091 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5092 return;
5093 }
5094
5095 fFsw &= ~X86_FSW_TOP_MASK;
5096 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5097 pFpuCtx->FSW = fFsw;
5098
5099 iemFpuRotateStackPush(pFpuCtx);
5100}
5101
5102
5103/**
5104 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5105 * FOP.
5106 *
5107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5108 * @param pResult The result to store.
5109 * @param iStReg Which FPU register to store it in.
5110 * @param uFpuOpcode The FPU opcode value.
5111 */
5112void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5113{
5114 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5115 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5116 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5117}
5118
5119
5120/**
5121 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5122 * FOP, and then pops the stack.
5123 *
5124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5125 * @param pResult The result to store.
5126 * @param iStReg Which FPU register to store it in.
5127 * @param uFpuOpcode The FPU opcode value.
5128 */
5129void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5130{
5131 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5132 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5133 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5134 iemFpuMaybePopOne(pFpuCtx);
5135}
5136
5137
5138/**
5139 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5140 * FPUDP, and FPUDS.
5141 *
5142 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5143 * @param pResult The result to store.
5144 * @param iStReg Which FPU register to store it in.
5145 * @param iEffSeg The effective memory operand selector register.
5146 * @param GCPtrEff The effective memory operand offset.
5147 * @param uFpuOpcode The FPU opcode value.
5148 */
5149void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5150 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5151{
5152 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5153 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5154 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5155 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5156}
5157
5158
5159/**
5160 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5161 * FPUDP, and FPUDS, and then pops the stack.
5162 *
5163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5164 * @param pResult The result to store.
5165 * @param iStReg Which FPU register to store it in.
5166 * @param iEffSeg The effective memory operand selector register.
5167 * @param GCPtrEff The effective memory operand offset.
5168 * @param uFpuOpcode The FPU opcode value.
5169 */
5170void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5171 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5172{
5173 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5174 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5175 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5176 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5177 iemFpuMaybePopOne(pFpuCtx);
5178}
5179
5180
5181/**
5182 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5183 *
5184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5185 * @param uFpuOpcode The FPU opcode value.
5186 */
5187void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5188{
5189 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5190 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5191}
5192
5193
5194/**
5195 * Updates the FSW, FOP, FPUIP, and FPUCS.
5196 *
5197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5198 * @param u16FSW The FSW from the current instruction.
5199 * @param uFpuOpcode The FPU opcode value.
5200 */
5201void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5202{
5203 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5204 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5205 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5206}
5207
5208
5209/**
5210 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5211 *
5212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5213 * @param u16FSW The FSW from the current instruction.
5214 * @param uFpuOpcode The FPU opcode value.
5215 */
5216void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5217{
5218 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5219 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5220 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5221 iemFpuMaybePopOne(pFpuCtx);
5222}
5223
5224
5225/**
5226 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5227 *
5228 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5229 * @param u16FSW The FSW from the current instruction.
5230 * @param iEffSeg The effective memory operand selector register.
5231 * @param GCPtrEff The effective memory operand offset.
5232 * @param uFpuOpcode The FPU opcode value.
5233 */
5234void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5235{
5236 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5237 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5238 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5239 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5240}
5241
5242
5243/**
5244 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5245 *
5246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5247 * @param u16FSW The FSW from the current instruction.
5248 * @param uFpuOpcode The FPU opcode value.
5249 */
5250void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5251{
5252 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5253 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5254 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5255 iemFpuMaybePopOne(pFpuCtx);
5256 iemFpuMaybePopOne(pFpuCtx);
5257}
5258
5259
5260/**
5261 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5262 *
5263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5264 * @param u16FSW The FSW from the current instruction.
5265 * @param iEffSeg The effective memory operand selector register.
5266 * @param GCPtrEff The effective memory operand offset.
5267 * @param uFpuOpcode The FPU opcode value.
5268 */
5269void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5270 uint16_t uFpuOpcode) RT_NOEXCEPT
5271{
5272 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5273 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5274 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5275 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5276 iemFpuMaybePopOne(pFpuCtx);
5277}
5278
5279
5280/**
5281 * Worker routine for raising an FPU stack underflow exception.
5282 *
5283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5284 * @param pFpuCtx The FPU context.
5285 * @param iStReg The stack register being accessed.
5286 */
5287static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5288{
5289 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5290 if (pFpuCtx->FCW & X86_FCW_IM)
5291 {
5292 /* Masked underflow. */
5293 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5294 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5295 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5296 if (iStReg != UINT8_MAX)
5297 {
5298 pFpuCtx->FTW |= RT_BIT(iReg);
5299 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5300 }
5301 }
5302 else
5303 {
5304 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5305 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5306 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5307 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5308 }
5309 RT_NOREF(pVCpu);
5310}
5311
5312
5313/**
5314 * Raises a FPU stack underflow exception.
5315 *
5316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5317 * @param iStReg The destination register that should be loaded
5318 * with QNaN if \#IS is not masked. Specify
5319 * UINT8_MAX if none (like for fcom).
5320 * @param uFpuOpcode The FPU opcode value.
5321 */
5322void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5323{
5324 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5325 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5326 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5327}
5328
5329
5330void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5331{
5332 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5333 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5334 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5335 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5336}
5337
5338
5339void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5340{
5341 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5342 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5343 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5344 iemFpuMaybePopOne(pFpuCtx);
5345}
5346
5347
5348void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5349 uint16_t uFpuOpcode) RT_NOEXCEPT
5350{
5351 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5352 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5353 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5354 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5355 iemFpuMaybePopOne(pFpuCtx);
5356}
5357
5358
5359void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5360{
5361 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5362 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5363 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5364 iemFpuMaybePopOne(pFpuCtx);
5365 iemFpuMaybePopOne(pFpuCtx);
5366}
5367
5368
5369void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5370{
5371 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5372 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5373
5374 if (pFpuCtx->FCW & X86_FCW_IM)
5375 {
5376 /* Masked overflow - Push QNaN. */
5377 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5378 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5379 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5380 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5381 pFpuCtx->FTW |= RT_BIT(iNewTop);
5382 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5383 iemFpuRotateStackPush(pFpuCtx);
5384 }
5385 else
5386 {
5387 /* Exception pending - don't change TOP or the register stack. */
5388 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5389 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5390 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5391 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5392 }
5393}
5394
5395
5396void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5397{
5398 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5399 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5400
5401 if (pFpuCtx->FCW & X86_FCW_IM)
5402 {
5403 /* Masked overflow - Push QNaN. */
5404 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5405 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5406 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5407 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5408 pFpuCtx->FTW |= RT_BIT(iNewTop);
5409 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5410 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5411 iemFpuRotateStackPush(pFpuCtx);
5412 }
5413 else
5414 {
5415 /* Exception pending - don't change TOP or the register stack. */
5416 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5417 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5418 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5419 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5420 }
5421}
5422
5423
5424/**
5425 * Worker routine for raising an FPU stack overflow exception on a push.
5426 *
5427 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5428 * @param pFpuCtx The FPU context.
5429 */
5430static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5431{
5432 if (pFpuCtx->FCW & X86_FCW_IM)
5433 {
5434 /* Masked overflow. */
5435 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5436 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5437 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5438 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5439 pFpuCtx->FTW |= RT_BIT(iNewTop);
5440 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5441 iemFpuRotateStackPush(pFpuCtx);
5442 }
5443 else
5444 {
5445 /* Exception pending - don't change TOP or the register stack. */
5446 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5447 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5448 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5449 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5450 }
5451 RT_NOREF(pVCpu);
5452}
5453
5454
5455/**
5456 * Raises a FPU stack overflow exception on a push.
5457 *
5458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5459 * @param uFpuOpcode The FPU opcode value.
5460 */
5461void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5462{
5463 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5464 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5465 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5466}
5467
5468
5469/**
5470 * Raises a FPU stack overflow exception on a push with a memory operand.
5471 *
5472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5473 * @param iEffSeg The effective memory operand selector register.
5474 * @param GCPtrEff The effective memory operand offset.
5475 * @param uFpuOpcode The FPU opcode value.
5476 */
5477void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5478{
5479 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5480 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5481 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5482 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5483}
5484
5485/** @} */
5486
5487
5488/** @name SSE+AVX SIMD access and helpers.
5489 *
5490 * @{
5491 */
5492/**
5493 * Stores a result in a SIMD XMM register, updates the MXCSR.
5494 *
5495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5496 * @param pResult The result to store.
5497 * @param iXmmReg Which SIMD XMM register to store the result in.
5498 */
5499void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5500{
5501 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5502 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5503
5504 /* The result is only updated if there is no unmasked exception pending. */
5505 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5506 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5507 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5508}
5509
5510
5511/**
5512 * Updates the MXCSR.
5513 *
5514 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5515 * @param fMxcsr The new MXCSR value.
5516 */
5517void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5518{
5519 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5520 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5521}
5522/** @} */
5523
5524
5525/** @name Memory access.
5526 *
5527 * @{
5528 */
5529
5530#undef LOG_GROUP
5531#define LOG_GROUP LOG_GROUP_IEM_MEM
5532
5533/**
5534 * Updates the IEMCPU::cbWritten counter if applicable.
5535 *
5536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5537 * @param fAccess The access being accounted for.
5538 * @param cbMem The access size.
5539 */
5540DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5541{
5542 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5543 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5544 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5545}
5546
5547
5548/**
5549 * Applies the segment limit, base and attributes.
5550 *
5551 * This may raise a \#GP or \#SS.
5552 *
5553 * @returns VBox strict status code.
5554 *
5555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5556 * @param fAccess The kind of access which is being performed.
5557 * @param iSegReg The index of the segment register to apply.
5558 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5559 * TSS, ++).
5560 * @param cbMem The access size.
5561 * @param pGCPtrMem Pointer to the guest memory address to apply
5562 * segmentation to. Input and output parameter.
5563 */
5564VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5565{
5566 if (iSegReg == UINT8_MAX)
5567 return VINF_SUCCESS;
5568
5569 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5570 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5571 switch (IEM_GET_CPU_MODE(pVCpu))
5572 {
5573 case IEMMODE_16BIT:
5574 case IEMMODE_32BIT:
5575 {
5576 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5577 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5578
5579 if ( pSel->Attr.n.u1Present
5580 && !pSel->Attr.n.u1Unusable)
5581 {
5582 Assert(pSel->Attr.n.u1DescType);
5583 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5584 {
5585 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5586 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5587 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5588
5589 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5590 {
5591 /** @todo CPL check. */
5592 }
5593
5594 /*
5595 * There are two kinds of data selectors, normal and expand down.
5596 */
5597 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5598 {
5599 if ( GCPtrFirst32 > pSel->u32Limit
5600 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5601 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5602 }
5603 else
5604 {
5605 /*
5606 * The upper boundary is defined by the B bit, not the G bit!
5607 */
5608 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5609 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5610 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5611 }
5612 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5613 }
5614 else
5615 {
5616 /*
5617 * Code selector and usually be used to read thru, writing is
5618 * only permitted in real and V8086 mode.
5619 */
5620 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5621 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5622 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5623 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5624 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5625
5626 if ( GCPtrFirst32 > pSel->u32Limit
5627 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5628 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5629
5630 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5631 {
5632 /** @todo CPL check. */
5633 }
5634
5635 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5636 }
5637 }
5638 else
5639 return iemRaiseGeneralProtectionFault0(pVCpu);
5640 return VINF_SUCCESS;
5641 }
5642
5643 case IEMMODE_64BIT:
5644 {
5645 RTGCPTR GCPtrMem = *pGCPtrMem;
5646 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5647 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5648
5649 Assert(cbMem >= 1);
5650 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5651 return VINF_SUCCESS;
5652 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5653 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5654 return iemRaiseGeneralProtectionFault0(pVCpu);
5655 }
5656
5657 default:
5658 AssertFailedReturn(VERR_IEM_IPE_7);
5659 }
5660}
5661
5662
5663/**
5664 * Translates a virtual address to a physical physical address and checks if we
5665 * can access the page as specified.
5666 *
5667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5668 * @param GCPtrMem The virtual address.
5669 * @param cbAccess The access size, for raising \#PF correctly for
5670 * FXSAVE and such.
5671 * @param fAccess The intended access.
5672 * @param pGCPhysMem Where to return the physical address.
5673 */
5674VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5675 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5676{
5677 /** @todo Need a different PGM interface here. We're currently using
5678 * generic / REM interfaces. this won't cut it for R0. */
5679 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5680 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5681 * here. */
5682 PGMPTWALK Walk;
5683 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5684 if (RT_FAILURE(rc))
5685 {
5686 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5687 /** @todo Check unassigned memory in unpaged mode. */
5688 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5689#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5690 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5691 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5692#endif
5693 *pGCPhysMem = NIL_RTGCPHYS;
5694 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5695 }
5696
5697 /* If the page is writable and does not have the no-exec bit set, all
5698 access is allowed. Otherwise we'll have to check more carefully... */
5699 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5700 {
5701 /* Write to read only memory? */
5702 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5703 && !(Walk.fEffective & X86_PTE_RW)
5704 && ( ( IEM_GET_CPL(pVCpu) == 3
5705 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5706 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5707 {
5708 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5709 *pGCPhysMem = NIL_RTGCPHYS;
5710#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5711 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5712 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5713#endif
5714 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5715 }
5716
5717 /* Kernel memory accessed by userland? */
5718 if ( !(Walk.fEffective & X86_PTE_US)
5719 && IEM_GET_CPL(pVCpu) == 3
5720 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5721 {
5722 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5723 *pGCPhysMem = NIL_RTGCPHYS;
5724#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5725 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5726 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5727#endif
5728 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5729 }
5730
5731 /* Executing non-executable memory? */
5732 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5733 && (Walk.fEffective & X86_PTE_PAE_NX)
5734 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5735 {
5736 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5737 *pGCPhysMem = NIL_RTGCPHYS;
5738#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5739 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5740 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5741#endif
5742 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5743 VERR_ACCESS_DENIED);
5744 }
5745 }
5746
5747 /*
5748 * Set the dirty / access flags.
5749 * ASSUMES this is set when the address is translated rather than on committ...
5750 */
5751 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5752 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5753 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5754 {
5755 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5756 AssertRC(rc2);
5757 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5758 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5759 }
5760
5761 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5762 *pGCPhysMem = GCPhys;
5763 return VINF_SUCCESS;
5764}
5765
5766#if 0 /*unused*/
5767/**
5768 * Looks up a memory mapping entry.
5769 *
5770 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5772 * @param pvMem The memory address.
5773 * @param fAccess The access to.
5774 */
5775DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5776{
5777 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5778 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5779 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5780 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5781 return 0;
5782 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5783 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5784 return 1;
5785 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5786 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5787 return 2;
5788 return VERR_NOT_FOUND;
5789}
5790#endif
5791
5792/**
5793 * Finds a free memmap entry when using iNextMapping doesn't work.
5794 *
5795 * @returns Memory mapping index, 1024 on failure.
5796 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5797 */
5798static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5799{
5800 /*
5801 * The easy case.
5802 */
5803 if (pVCpu->iem.s.cActiveMappings == 0)
5804 {
5805 pVCpu->iem.s.iNextMapping = 1;
5806 return 0;
5807 }
5808
5809 /* There should be enough mappings for all instructions. */
5810 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5811
5812 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5813 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5814 return i;
5815
5816 AssertFailedReturn(1024);
5817}
5818
5819
5820/**
5821 * Commits a bounce buffer that needs writing back and unmaps it.
5822 *
5823 * @returns Strict VBox status code.
5824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5825 * @param iMemMap The index of the buffer to commit.
5826 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5827 * Always false in ring-3, obviously.
5828 */
5829static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5830{
5831 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5832 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5833#ifdef IN_RING3
5834 Assert(!fPostponeFail);
5835 RT_NOREF_PV(fPostponeFail);
5836#endif
5837
5838 /*
5839 * Do the writing.
5840 */
5841 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5842 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5843 {
5844 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5845 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5846 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5847 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5848 {
5849 /*
5850 * Carefully and efficiently dealing with access handler return
5851 * codes make this a little bloated.
5852 */
5853 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5854 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5855 pbBuf,
5856 cbFirst,
5857 PGMACCESSORIGIN_IEM);
5858 if (rcStrict == VINF_SUCCESS)
5859 {
5860 if (cbSecond)
5861 {
5862 rcStrict = PGMPhysWrite(pVM,
5863 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5864 pbBuf + cbFirst,
5865 cbSecond,
5866 PGMACCESSORIGIN_IEM);
5867 if (rcStrict == VINF_SUCCESS)
5868 { /* nothing */ }
5869 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5870 {
5871 LogEx(LOG_GROUP_IEM,
5872 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5873 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5874 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5875 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5876 }
5877#ifndef IN_RING3
5878 else if (fPostponeFail)
5879 {
5880 LogEx(LOG_GROUP_IEM,
5881 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5882 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5883 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5884 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5885 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5886 return iemSetPassUpStatus(pVCpu, rcStrict);
5887 }
5888#endif
5889 else
5890 {
5891 LogEx(LOG_GROUP_IEM,
5892 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5893 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5894 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5895 return rcStrict;
5896 }
5897 }
5898 }
5899 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5900 {
5901 if (!cbSecond)
5902 {
5903 LogEx(LOG_GROUP_IEM,
5904 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5905 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5906 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5907 }
5908 else
5909 {
5910 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5911 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5912 pbBuf + cbFirst,
5913 cbSecond,
5914 PGMACCESSORIGIN_IEM);
5915 if (rcStrict2 == VINF_SUCCESS)
5916 {
5917 LogEx(LOG_GROUP_IEM,
5918 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5919 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5920 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5921 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5922 }
5923 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5924 {
5925 LogEx(LOG_GROUP_IEM,
5926 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5927 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5928 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5929 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5930 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5931 }
5932#ifndef IN_RING3
5933 else if (fPostponeFail)
5934 {
5935 LogEx(LOG_GROUP_IEM,
5936 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5937 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5938 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5939 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5940 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5941 return iemSetPassUpStatus(pVCpu, rcStrict);
5942 }
5943#endif
5944 else
5945 {
5946 LogEx(LOG_GROUP_IEM,
5947 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5948 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5949 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5950 return rcStrict2;
5951 }
5952 }
5953 }
5954#ifndef IN_RING3
5955 else if (fPostponeFail)
5956 {
5957 LogEx(LOG_GROUP_IEM,
5958 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5959 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5960 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5961 if (!cbSecond)
5962 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5963 else
5964 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5965 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5966 return iemSetPassUpStatus(pVCpu, rcStrict);
5967 }
5968#endif
5969 else
5970 {
5971 LogEx(LOG_GROUP_IEM,
5972 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5973 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5974 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5975 return rcStrict;
5976 }
5977 }
5978 else
5979 {
5980 /*
5981 * No access handlers, much simpler.
5982 */
5983 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5984 if (RT_SUCCESS(rc))
5985 {
5986 if (cbSecond)
5987 {
5988 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5989 if (RT_SUCCESS(rc))
5990 { /* likely */ }
5991 else
5992 {
5993 LogEx(LOG_GROUP_IEM,
5994 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5995 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5996 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5997 return rc;
5998 }
5999 }
6000 }
6001 else
6002 {
6003 LogEx(LOG_GROUP_IEM,
6004 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6005 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6006 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6007 return rc;
6008 }
6009 }
6010 }
6011
6012#if defined(IEM_LOG_MEMORY_WRITES)
6013 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6014 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
6015 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6016 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6017 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6018 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6019
6020 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6021 g_cbIemWrote = cbWrote;
6022 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6023#endif
6024
6025 /*
6026 * Free the mapping entry.
6027 */
6028 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6029 Assert(pVCpu->iem.s.cActiveMappings != 0);
6030 pVCpu->iem.s.cActiveMappings--;
6031 return VINF_SUCCESS;
6032}
6033
6034
6035/**
6036 * iemMemMap worker that deals with a request crossing pages.
6037 */
6038static VBOXSTRICTRC
6039iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6040 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6041{
6042 Assert(cbMem <= GUEST_PAGE_SIZE);
6043
6044 /*
6045 * Do the address translations.
6046 */
6047 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6048 RTGCPHYS GCPhysFirst;
6049 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6050 if (rcStrict != VINF_SUCCESS)
6051 return rcStrict;
6052 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6053
6054 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6055 RTGCPHYS GCPhysSecond;
6056 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6057 cbSecondPage, fAccess, &GCPhysSecond);
6058 if (rcStrict != VINF_SUCCESS)
6059 return rcStrict;
6060 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6061 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6062
6063 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6064
6065 /*
6066 * Read in the current memory content if it's a read, execute or partial
6067 * write access.
6068 */
6069 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6070
6071 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6072 {
6073 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6074 {
6075 /*
6076 * Must carefully deal with access handler status codes here,
6077 * makes the code a bit bloated.
6078 */
6079 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6080 if (rcStrict == VINF_SUCCESS)
6081 {
6082 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6083 if (rcStrict == VINF_SUCCESS)
6084 { /*likely */ }
6085 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6086 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6087 else
6088 {
6089 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6090 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6091 return rcStrict;
6092 }
6093 }
6094 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6095 {
6096 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6097 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6098 {
6099 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6100 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6101 }
6102 else
6103 {
6104 LogEx(LOG_GROUP_IEM,
6105 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6106 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6107 return rcStrict2;
6108 }
6109 }
6110 else
6111 {
6112 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6113 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6114 return rcStrict;
6115 }
6116 }
6117 else
6118 {
6119 /*
6120 * No informational status codes here, much more straight forward.
6121 */
6122 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6123 if (RT_SUCCESS(rc))
6124 {
6125 Assert(rc == VINF_SUCCESS);
6126 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6127 if (RT_SUCCESS(rc))
6128 Assert(rc == VINF_SUCCESS);
6129 else
6130 {
6131 LogEx(LOG_GROUP_IEM,
6132 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6133 return rc;
6134 }
6135 }
6136 else
6137 {
6138 LogEx(LOG_GROUP_IEM,
6139 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6140 return rc;
6141 }
6142 }
6143 }
6144#ifdef VBOX_STRICT
6145 else
6146 memset(pbBuf, 0xcc, cbMem);
6147 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6148 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6149#endif
6150 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6151
6152 /*
6153 * Commit the bounce buffer entry.
6154 */
6155 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6156 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6157 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6158 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6159 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6160 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6161 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6162 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6163 pVCpu->iem.s.cActiveMappings++;
6164
6165 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6166 *ppvMem = pbBuf;
6167 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6168 return VINF_SUCCESS;
6169}
6170
6171
6172/**
6173 * iemMemMap woker that deals with iemMemPageMap failures.
6174 */
6175static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6176 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6177{
6178 /*
6179 * Filter out conditions we can handle and the ones which shouldn't happen.
6180 */
6181 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6182 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6183 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6184 {
6185 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6186 return rcMap;
6187 }
6188 pVCpu->iem.s.cPotentialExits++;
6189
6190 /*
6191 * Read in the current memory content if it's a read, execute or partial
6192 * write access.
6193 */
6194 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6195 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6196 {
6197 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6198 memset(pbBuf, 0xff, cbMem);
6199 else
6200 {
6201 int rc;
6202 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6203 {
6204 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6205 if (rcStrict == VINF_SUCCESS)
6206 { /* nothing */ }
6207 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6208 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6209 else
6210 {
6211 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6212 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6213 return rcStrict;
6214 }
6215 }
6216 else
6217 {
6218 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6219 if (RT_SUCCESS(rc))
6220 { /* likely */ }
6221 else
6222 {
6223 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6224 GCPhysFirst, rc));
6225 return rc;
6226 }
6227 }
6228 }
6229 }
6230#ifdef VBOX_STRICT
6231 else
6232 memset(pbBuf, 0xcc, cbMem);
6233#endif
6234#ifdef VBOX_STRICT
6235 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6236 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6237#endif
6238
6239 /*
6240 * Commit the bounce buffer entry.
6241 */
6242 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6243 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6244 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6245 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6246 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6247 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6248 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6249 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6250 pVCpu->iem.s.cActiveMappings++;
6251
6252 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6253 *ppvMem = pbBuf;
6254 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6255 return VINF_SUCCESS;
6256}
6257
6258
6259
6260/**
6261 * Maps the specified guest memory for the given kind of access.
6262 *
6263 * This may be using bounce buffering of the memory if it's crossing a page
6264 * boundary or if there is an access handler installed for any of it. Because
6265 * of lock prefix guarantees, we're in for some extra clutter when this
6266 * happens.
6267 *
6268 * This may raise a \#GP, \#SS, \#PF or \#AC.
6269 *
6270 * @returns VBox strict status code.
6271 *
6272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6273 * @param ppvMem Where to return the pointer to the mapped memory.
6274 * @param pbUnmapInfo Where to return unmap info to be passed to
6275 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6276 * done.
6277 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6278 * 8, 12, 16, 32 or 512. When used by string operations
6279 * it can be up to a page.
6280 * @param iSegReg The index of the segment register to use for this
6281 * access. The base and limits are checked. Use UINT8_MAX
6282 * to indicate that no segmentation is required (for IDT,
6283 * GDT and LDT accesses).
6284 * @param GCPtrMem The address of the guest memory.
6285 * @param fAccess How the memory is being accessed. The
6286 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6287 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6288 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6289 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6290 * set.
6291 * @param uAlignCtl Alignment control:
6292 * - Bits 15:0 is the alignment mask.
6293 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6294 * IEM_MEMMAP_F_ALIGN_SSE, and
6295 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6296 * Pass zero to skip alignment.
6297 */
6298VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6299 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6300{
6301 /*
6302 * Check the input and figure out which mapping entry to use.
6303 */
6304 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6305 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6306 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6307 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6308 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6309
6310 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6311 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6312 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6313 {
6314 iMemMap = iemMemMapFindFree(pVCpu);
6315 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6316 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6317 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6318 pVCpu->iem.s.aMemMappings[2].fAccess),
6319 VERR_IEM_IPE_9);
6320 }
6321
6322 /*
6323 * Map the memory, checking that we can actually access it. If something
6324 * slightly complicated happens, fall back on bounce buffering.
6325 */
6326 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6327 if (rcStrict == VINF_SUCCESS)
6328 { /* likely */ }
6329 else
6330 return rcStrict;
6331
6332 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6333 { /* likely */ }
6334 else
6335 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6336
6337 /*
6338 * Alignment check.
6339 */
6340 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6341 { /* likelyish */ }
6342 else
6343 {
6344 /* Misaligned access. */
6345 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6346 {
6347 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6348 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6349 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6350 {
6351 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6352
6353 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6354 return iemRaiseAlignmentCheckException(pVCpu);
6355 }
6356 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6357 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6358 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6359 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6360 * that's what FXSAVE does on a 10980xe. */
6361 && iemMemAreAlignmentChecksEnabled(pVCpu))
6362 return iemRaiseAlignmentCheckException(pVCpu);
6363 else
6364 return iemRaiseGeneralProtectionFault0(pVCpu);
6365 }
6366
6367#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6368 /* If the access is atomic there are host platform alignmnet restrictions
6369 we need to conform with. */
6370 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6371# if defined(RT_ARCH_AMD64)
6372 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6373# elif defined(RT_ARCH_ARM64)
6374 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6375# else
6376# error port me
6377# endif
6378 )
6379 { /* okay */ }
6380 else
6381 {
6382 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6383 pVCpu->iem.s.cMisalignedAtomics += 1;
6384 return VINF_EM_EMULATE_SPLIT_LOCK;
6385 }
6386#endif
6387 }
6388
6389#ifdef IEM_WITH_DATA_TLB
6390 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6391
6392 /*
6393 * Get the TLB entry for this page.
6394 */
6395 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6396 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6397 if (pTlbe->uTag == uTag)
6398 {
6399# ifdef VBOX_WITH_STATISTICS
6400 pVCpu->iem.s.DataTlb.cTlbHits++;
6401# endif
6402 }
6403 else
6404 {
6405 pVCpu->iem.s.DataTlb.cTlbMisses++;
6406 PGMPTWALK Walk;
6407 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6408 if (RT_FAILURE(rc))
6409 {
6410 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6411# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6412 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6413 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6414# endif
6415 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6416 }
6417
6418 Assert(Walk.fSucceeded);
6419 pTlbe->uTag = uTag;
6420 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6421 pTlbe->GCPhys = Walk.GCPhys;
6422 pTlbe->pbMappingR3 = NULL;
6423 }
6424
6425 /*
6426 * Check TLB page table level access flags.
6427 */
6428 /* If the page is either supervisor only or non-writable, we need to do
6429 more careful access checks. */
6430 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6431 {
6432 /* Write to read only memory? */
6433 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6434 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6435 && ( ( IEM_GET_CPL(pVCpu) == 3
6436 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6437 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6438 {
6439 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6440# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6441 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6442 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6443# endif
6444 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6445 }
6446
6447 /* Kernel memory accessed by userland? */
6448 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6449 && IEM_GET_CPL(pVCpu) == 3
6450 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6451 {
6452 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6453# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6454 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6455 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6456# endif
6457 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6458 }
6459 }
6460
6461 /*
6462 * Set the dirty / access flags.
6463 * ASSUMES this is set when the address is translated rather than on commit...
6464 */
6465 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6466 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6467 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6468 {
6469 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6470 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6471 AssertRC(rc2);
6472 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6473 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6474 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6475 }
6476
6477 /*
6478 * Look up the physical page info if necessary.
6479 */
6480 uint8_t *pbMem = NULL;
6481 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6482# ifdef IN_RING3
6483 pbMem = pTlbe->pbMappingR3;
6484# else
6485 pbMem = NULL;
6486# endif
6487 else
6488 {
6489 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6490 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6491 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6492 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6493 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6494 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6495 { /* likely */ }
6496 else
6497 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6498 pTlbe->pbMappingR3 = NULL;
6499 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6500 | IEMTLBE_F_NO_MAPPINGR3
6501 | IEMTLBE_F_PG_NO_READ
6502 | IEMTLBE_F_PG_NO_WRITE
6503 | IEMTLBE_F_PG_UNASSIGNED
6504 | IEMTLBE_F_PG_CODE_PAGE);
6505 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6506 &pbMem, &pTlbe->fFlagsAndPhysRev);
6507 AssertRCReturn(rc, rc);
6508# ifdef IN_RING3
6509 pTlbe->pbMappingR3 = pbMem;
6510# endif
6511 }
6512
6513 /*
6514 * Check the physical page level access and mapping.
6515 */
6516 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6517 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6518 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6519 { /* probably likely */ }
6520 else
6521 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6522 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6523 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6524 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6525 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6526 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6527
6528 if (pbMem)
6529 {
6530 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6531 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6532 fAccess |= IEM_ACCESS_NOT_LOCKED;
6533 }
6534 else
6535 {
6536 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6537 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6538 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6539 if (rcStrict != VINF_SUCCESS)
6540 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6541 }
6542
6543 void * const pvMem = pbMem;
6544
6545 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6546 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6547 if (fAccess & IEM_ACCESS_TYPE_READ)
6548 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6549
6550#else /* !IEM_WITH_DATA_TLB */
6551
6552 RTGCPHYS GCPhysFirst;
6553 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6554 if (rcStrict != VINF_SUCCESS)
6555 return rcStrict;
6556
6557 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6558 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6559 if (fAccess & IEM_ACCESS_TYPE_READ)
6560 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6561
6562 void *pvMem;
6563 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6564 if (rcStrict != VINF_SUCCESS)
6565 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6566
6567#endif /* !IEM_WITH_DATA_TLB */
6568
6569 /*
6570 * Fill in the mapping table entry.
6571 */
6572 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6573 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6574 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6575 pVCpu->iem.s.cActiveMappings += 1;
6576
6577 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6578 *ppvMem = pvMem;
6579 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6580 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6581 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6582
6583 return VINF_SUCCESS;
6584}
6585
6586
6587/**
6588 * Commits the guest memory if bounce buffered and unmaps it.
6589 *
6590 * @returns Strict VBox status code.
6591 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6592 * @param bUnmapInfo Unmap info set by iemMemMap.
6593 */
6594VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6595{
6596 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6597 AssertMsgReturn( (bUnmapInfo & 0x08)
6598 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6599 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6600 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6601 VERR_NOT_FOUND);
6602
6603 /* If it's bounce buffered, we may need to write back the buffer. */
6604 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6605 {
6606 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6607 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6608 }
6609 /* Otherwise unlock it. */
6610 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6611 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6612
6613 /* Free the entry. */
6614 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6615 Assert(pVCpu->iem.s.cActiveMappings != 0);
6616 pVCpu->iem.s.cActiveMappings--;
6617 return VINF_SUCCESS;
6618}
6619
6620
6621/**
6622 * Rolls back the guest memory (conceptually only) and unmaps it.
6623 *
6624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6625 * @param bUnmapInfo Unmap info set by iemMemMap.
6626 */
6627void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6628{
6629 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6630 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6631 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6632 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6633 == ((unsigned)bUnmapInfo >> 4),
6634 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6635
6636 /* Unlock it if necessary. */
6637 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6638 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6639
6640 /* Free the entry. */
6641 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6642 Assert(pVCpu->iem.s.cActiveMappings != 0);
6643 pVCpu->iem.s.cActiveMappings--;
6644}
6645
6646#ifdef IEM_WITH_SETJMP
6647
6648/**
6649 * Maps the specified guest memory for the given kind of access, longjmp on
6650 * error.
6651 *
6652 * This may be using bounce buffering of the memory if it's crossing a page
6653 * boundary or if there is an access handler installed for any of it. Because
6654 * of lock prefix guarantees, we're in for some extra clutter when this
6655 * happens.
6656 *
6657 * This may raise a \#GP, \#SS, \#PF or \#AC.
6658 *
6659 * @returns Pointer to the mapped memory.
6660 *
6661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6662 * @param bUnmapInfo Where to return unmap info to be passed to
6663 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6664 * iemMemCommitAndUnmapWoSafeJmp,
6665 * iemMemCommitAndUnmapRoSafeJmp,
6666 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6667 * when done.
6668 * @param cbMem The number of bytes to map. This is usually 1,
6669 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6670 * string operations it can be up to a page.
6671 * @param iSegReg The index of the segment register to use for
6672 * this access. The base and limits are checked.
6673 * Use UINT8_MAX to indicate that no segmentation
6674 * is required (for IDT, GDT and LDT accesses).
6675 * @param GCPtrMem The address of the guest memory.
6676 * @param fAccess How the memory is being accessed. The
6677 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6678 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6679 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6680 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6681 * set.
6682 * @param uAlignCtl Alignment control:
6683 * - Bits 15:0 is the alignment mask.
6684 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6685 * IEM_MEMMAP_F_ALIGN_SSE, and
6686 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6687 * Pass zero to skip alignment.
6688 */
6689void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6690 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6691{
6692 /*
6693 * Check the input, check segment access and adjust address
6694 * with segment base.
6695 */
6696 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6697 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6698 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6699
6700 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6701 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6702 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6703
6704 /*
6705 * Alignment check.
6706 */
6707 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6708 { /* likelyish */ }
6709 else
6710 {
6711 /* Misaligned access. */
6712 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6713 {
6714 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6715 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6716 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6717 {
6718 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6719
6720 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6721 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6722 }
6723 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6724 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6725 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6726 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6727 * that's what FXSAVE does on a 10980xe. */
6728 && iemMemAreAlignmentChecksEnabled(pVCpu))
6729 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6730 else
6731 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6732 }
6733
6734#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6735 /* If the access is atomic there are host platform alignmnet restrictions
6736 we need to conform with. */
6737 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6738# if defined(RT_ARCH_AMD64)
6739 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6740# elif defined(RT_ARCH_ARM64)
6741 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6742# else
6743# error port me
6744# endif
6745 )
6746 { /* okay */ }
6747 else
6748 {
6749 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6750 pVCpu->iem.s.cMisalignedAtomics += 1;
6751 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
6752 }
6753#endif
6754 }
6755
6756 /*
6757 * Figure out which mapping entry to use.
6758 */
6759 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6760 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6761 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6762 {
6763 iMemMap = iemMemMapFindFree(pVCpu);
6764 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6765 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6766 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6767 pVCpu->iem.s.aMemMappings[2].fAccess),
6768 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6769 }
6770
6771 /*
6772 * Crossing a page boundary?
6773 */
6774 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6775 { /* No (likely). */ }
6776 else
6777 {
6778 void *pvMem;
6779 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6780 if (rcStrict == VINF_SUCCESS)
6781 return pvMem;
6782 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6783 }
6784
6785#ifdef IEM_WITH_DATA_TLB
6786 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6787
6788 /*
6789 * Get the TLB entry for this page.
6790 */
6791 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6792 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6793 if (pTlbe->uTag == uTag)
6794 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6795 else
6796 {
6797 pVCpu->iem.s.DataTlb.cTlbMisses++;
6798 PGMPTWALK Walk;
6799 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6800 if (RT_FAILURE(rc))
6801 {
6802 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6803# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6804 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6805 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6806# endif
6807 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6808 }
6809
6810 Assert(Walk.fSucceeded);
6811 pTlbe->uTag = uTag;
6812 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6813 pTlbe->GCPhys = Walk.GCPhys;
6814 pTlbe->pbMappingR3 = NULL;
6815 }
6816
6817 /*
6818 * Check the flags and physical revision.
6819 */
6820 /** @todo make the caller pass these in with fAccess. */
6821 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6822 ? IEMTLBE_F_PT_NO_USER : 0;
6823 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6824 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6825 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6826 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6827 ? IEMTLBE_F_PT_NO_WRITE : 0)
6828 : 0;
6829 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6830 uint8_t *pbMem = NULL;
6831 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6832 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6833# ifdef IN_RING3
6834 pbMem = pTlbe->pbMappingR3;
6835# else
6836 pbMem = NULL;
6837# endif
6838 else
6839 {
6840 /*
6841 * Okay, something isn't quite right or needs refreshing.
6842 */
6843 /* Write to read only memory? */
6844 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6845 {
6846 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6847# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6848 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6849 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6850# endif
6851 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6852 }
6853
6854 /* Kernel memory accessed by userland? */
6855 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6856 {
6857 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6858# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6859 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6860 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6861# endif
6862 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6863 }
6864
6865 /* Set the dirty / access flags.
6866 ASSUMES this is set when the address is translated rather than on commit... */
6867 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6868 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6869 {
6870 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6871 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6872 AssertRC(rc2);
6873 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6874 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6875 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6876 }
6877
6878 /*
6879 * Check if the physical page info needs updating.
6880 */
6881 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6882# ifdef IN_RING3
6883 pbMem = pTlbe->pbMappingR3;
6884# else
6885 pbMem = NULL;
6886# endif
6887 else
6888 {
6889 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6890 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6891 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6892 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6893 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6894 pTlbe->pbMappingR3 = NULL;
6895 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6896 | IEMTLBE_F_NO_MAPPINGR3
6897 | IEMTLBE_F_PG_NO_READ
6898 | IEMTLBE_F_PG_NO_WRITE
6899 | IEMTLBE_F_PG_UNASSIGNED
6900 | IEMTLBE_F_PG_CODE_PAGE);
6901 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6902 &pbMem, &pTlbe->fFlagsAndPhysRev);
6903 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6904# ifdef IN_RING3
6905 pTlbe->pbMappingR3 = pbMem;
6906# endif
6907 }
6908
6909 /*
6910 * Check the physical page level access and mapping.
6911 */
6912 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6913 { /* probably likely */ }
6914 else
6915 {
6916 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
6917 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6918 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6919 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6920 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6921 if (rcStrict == VINF_SUCCESS)
6922 return pbMem;
6923 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6924 }
6925 }
6926 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6927
6928 if (pbMem)
6929 {
6930 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6931 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6932 fAccess |= IEM_ACCESS_NOT_LOCKED;
6933 }
6934 else
6935 {
6936 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6937 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6938 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6939 if (rcStrict == VINF_SUCCESS)
6940 {
6941 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6942 return pbMem;
6943 }
6944 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6945 }
6946
6947 void * const pvMem = pbMem;
6948
6949 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6950 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6951 if (fAccess & IEM_ACCESS_TYPE_READ)
6952 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6953
6954#else /* !IEM_WITH_DATA_TLB */
6955
6956
6957 RTGCPHYS GCPhysFirst;
6958 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6959 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6960 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6961
6962 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6963 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6964 if (fAccess & IEM_ACCESS_TYPE_READ)
6965 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6966
6967 void *pvMem;
6968 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6969 if (rcStrict == VINF_SUCCESS)
6970 { /* likely */ }
6971 else
6972 {
6973 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6974 if (rcStrict == VINF_SUCCESS)
6975 return pvMem;
6976 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6977 }
6978
6979#endif /* !IEM_WITH_DATA_TLB */
6980
6981 /*
6982 * Fill in the mapping table entry.
6983 */
6984 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6985 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6986 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6987 pVCpu->iem.s.cActiveMappings++;
6988
6989 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6990
6991 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6992 return pvMem;
6993}
6994
6995
6996/**
6997 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6998 *
6999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7000 * @param pvMem The mapping.
7001 * @param fAccess The kind of access.
7002 */
7003void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7004{
7005 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7006 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
7007 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7008 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7009 == ((unsigned)bUnmapInfo >> 4),
7010 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
7011
7012 /* If it's bounce buffered, we may need to write back the buffer. */
7013 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7014 {
7015 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7016 {
7017 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
7018 if (rcStrict == VINF_SUCCESS)
7019 return;
7020 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7021 }
7022 }
7023 /* Otherwise unlock it. */
7024 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7025 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7026
7027 /* Free the entry. */
7028 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7029 Assert(pVCpu->iem.s.cActiveMappings != 0);
7030 pVCpu->iem.s.cActiveMappings--;
7031}
7032
7033
7034/** Fallback for iemMemCommitAndUnmapRwJmp. */
7035void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7036{
7037 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7038 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7039}
7040
7041
7042/** Fallback for iemMemCommitAndUnmapAtJmp. */
7043void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7044{
7045 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7046 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7047}
7048
7049
7050/** Fallback for iemMemCommitAndUnmapWoJmp. */
7051void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7052{
7053 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7054 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7055}
7056
7057
7058/** Fallback for iemMemCommitAndUnmapRoJmp. */
7059void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7060{
7061 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7062 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7063}
7064
7065
7066/** Fallback for iemMemRollbackAndUnmapWo. */
7067void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7068{
7069 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7070 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7071}
7072
7073#endif /* IEM_WITH_SETJMP */
7074
7075#ifndef IN_RING3
7076/**
7077 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7078 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7079 *
7080 * Allows the instruction to be completed and retired, while the IEM user will
7081 * return to ring-3 immediately afterwards and do the postponed writes there.
7082 *
7083 * @returns VBox status code (no strict statuses). Caller must check
7084 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7086 * @param pvMem The mapping.
7087 * @param fAccess The kind of access.
7088 */
7089VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7090{
7091 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7092 AssertMsgReturn( (bUnmapInfo & 0x08)
7093 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7094 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7095 == ((unsigned)bUnmapInfo >> 4),
7096 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7097 VERR_NOT_FOUND);
7098
7099 /* If it's bounce buffered, we may need to write back the buffer. */
7100 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7101 {
7102 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7103 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7104 }
7105 /* Otherwise unlock it. */
7106 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7107 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7108
7109 /* Free the entry. */
7110 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7111 Assert(pVCpu->iem.s.cActiveMappings != 0);
7112 pVCpu->iem.s.cActiveMappings--;
7113 return VINF_SUCCESS;
7114}
7115#endif
7116
7117
7118/**
7119 * Rollbacks mappings, releasing page locks and such.
7120 *
7121 * The caller shall only call this after checking cActiveMappings.
7122 *
7123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7124 */
7125void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7126{
7127 Assert(pVCpu->iem.s.cActiveMappings > 0);
7128
7129 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7130 while (iMemMap-- > 0)
7131 {
7132 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7133 if (fAccess != IEM_ACCESS_INVALID)
7134 {
7135 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7136 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7137 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7138 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7139 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7140 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7141 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7142 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7143 pVCpu->iem.s.cActiveMappings--;
7144 }
7145 }
7146}
7147
7148
7149/*
7150 * Instantiate R/W templates.
7151 */
7152#define TMPL_MEM_WITH_STACK
7153
7154#define TMPL_MEM_TYPE uint8_t
7155#define TMPL_MEM_FN_SUFF U8
7156#define TMPL_MEM_FMT_TYPE "%#04x"
7157#define TMPL_MEM_FMT_DESC "byte"
7158#include "IEMAllMemRWTmpl.cpp.h"
7159
7160#define TMPL_MEM_TYPE uint16_t
7161#define TMPL_MEM_FN_SUFF U16
7162#define TMPL_MEM_FMT_TYPE "%#06x"
7163#define TMPL_MEM_FMT_DESC "word"
7164#include "IEMAllMemRWTmpl.cpp.h"
7165
7166#define TMPL_WITH_PUSH_SREG
7167#define TMPL_MEM_TYPE uint32_t
7168#define TMPL_MEM_FN_SUFF U32
7169#define TMPL_MEM_FMT_TYPE "%#010x"
7170#define TMPL_MEM_FMT_DESC "dword"
7171#include "IEMAllMemRWTmpl.cpp.h"
7172#undef TMPL_WITH_PUSH_SREG
7173
7174#define TMPL_MEM_TYPE uint64_t
7175#define TMPL_MEM_FN_SUFF U64
7176#define TMPL_MEM_FMT_TYPE "%#018RX64"
7177#define TMPL_MEM_FMT_DESC "qword"
7178#include "IEMAllMemRWTmpl.cpp.h"
7179
7180#undef TMPL_MEM_WITH_STACK
7181
7182#define TMPL_MEM_TYPE uint64_t
7183#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7184#define TMPL_MEM_FN_SUFF U64AlignedU128
7185#define TMPL_MEM_FMT_TYPE "%#018RX64"
7186#define TMPL_MEM_FMT_DESC "qword"
7187#include "IEMAllMemRWTmpl.cpp.h"
7188
7189/* See IEMAllMemRWTmplInline.cpp.h */
7190#define TMPL_MEM_BY_REF
7191
7192#define TMPL_MEM_TYPE RTFLOAT80U
7193#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7194#define TMPL_MEM_FN_SUFF R80
7195#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7196#define TMPL_MEM_FMT_DESC "tword"
7197#include "IEMAllMemRWTmpl.cpp.h"
7198
7199#define TMPL_MEM_TYPE RTPBCD80U
7200#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7201#define TMPL_MEM_FN_SUFF D80
7202#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7203#define TMPL_MEM_FMT_DESC "tword"
7204#include "IEMAllMemRWTmpl.cpp.h"
7205
7206#define TMPL_MEM_TYPE RTUINT128U
7207#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7208#define TMPL_MEM_FN_SUFF U128
7209#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7210#define TMPL_MEM_FMT_DESC "dqword"
7211#include "IEMAllMemRWTmpl.cpp.h"
7212
7213#define TMPL_MEM_TYPE RTUINT128U
7214#define TMPL_MEM_TYPE_ALIGN 0
7215#define TMPL_MEM_FN_SUFF U128NoAc
7216#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7217#define TMPL_MEM_FMT_DESC "dqword"
7218#include "IEMAllMemRWTmpl.cpp.h"
7219
7220/**
7221 * Fetches a data dword and zero extends it to a qword.
7222 *
7223 * @returns Strict VBox status code.
7224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7225 * @param pu64Dst Where to return the qword.
7226 * @param iSegReg The index of the segment register to use for
7227 * this access. The base and limits are checked.
7228 * @param GCPtrMem The address of the guest memory.
7229 */
7230VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7231{
7232 /* The lazy approach for now... */
7233 uint8_t bUnmapInfo;
7234 uint32_t const *pu32Src;
7235 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7236 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7237 if (rc == VINF_SUCCESS)
7238 {
7239 *pu64Dst = *pu32Src;
7240 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7241 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7242 }
7243 return rc;
7244}
7245
7246
7247#ifdef SOME_UNUSED_FUNCTION
7248/**
7249 * Fetches a data dword and sign extends it to a qword.
7250 *
7251 * @returns Strict VBox status code.
7252 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7253 * @param pu64Dst Where to return the sign extended value.
7254 * @param iSegReg The index of the segment register to use for
7255 * this access. The base and limits are checked.
7256 * @param GCPtrMem The address of the guest memory.
7257 */
7258VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7259{
7260 /* The lazy approach for now... */
7261 uint8_t bUnmapInfo;
7262 int32_t const *pi32Src;
7263 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7264 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7265 if (rc == VINF_SUCCESS)
7266 {
7267 *pu64Dst = *pi32Src;
7268 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7269 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7270 }
7271#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7272 else
7273 *pu64Dst = 0;
7274#endif
7275 return rc;
7276}
7277#endif
7278
7279
7280/**
7281 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7282 * related.
7283 *
7284 * Raises \#GP(0) if not aligned.
7285 *
7286 * @returns Strict VBox status code.
7287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7288 * @param pu128Dst Where to return the qword.
7289 * @param iSegReg The index of the segment register to use for
7290 * this access. The base and limits are checked.
7291 * @param GCPtrMem The address of the guest memory.
7292 */
7293VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7294{
7295 /* The lazy approach for now... */
7296 uint8_t bUnmapInfo;
7297 PCRTUINT128U pu128Src;
7298 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem,
7299 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7300 if (rc == VINF_SUCCESS)
7301 {
7302 pu128Dst->au64[0] = pu128Src->au64[0];
7303 pu128Dst->au64[1] = pu128Src->au64[1];
7304 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7305 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7306 }
7307 return rc;
7308}
7309
7310
7311#ifdef IEM_WITH_SETJMP
7312/**
7313 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7314 * related, longjmp on error.
7315 *
7316 * Raises \#GP(0) if not aligned.
7317 *
7318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7319 * @param pu128Dst Where to return the qword.
7320 * @param iSegReg The index of the segment register to use for
7321 * this access. The base and limits are checked.
7322 * @param GCPtrMem The address of the guest memory.
7323 */
7324void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7325 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7326{
7327 /* The lazy approach for now... */
7328 uint8_t bUnmapInfo;
7329 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7330 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7331 pu128Dst->au64[0] = pu128Src->au64[0];
7332 pu128Dst->au64[1] = pu128Src->au64[1];
7333 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7334 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7335}
7336#endif
7337
7338
7339/**
7340 * Fetches a data oword (octo word), generally AVX related.
7341 *
7342 * @returns Strict VBox status code.
7343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7344 * @param pu256Dst Where to return the qword.
7345 * @param iSegReg The index of the segment register to use for
7346 * this access. The base and limits are checked.
7347 * @param GCPtrMem The address of the guest memory.
7348 */
7349VBOXSTRICTRC iemMemFetchDataU256NoAc(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7350{
7351 /* The lazy approach for now... */
7352 uint8_t bUnmapInfo;
7353 PCRTUINT256U pu256Src;
7354 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7355 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7356 if (rc == VINF_SUCCESS)
7357 {
7358 pu256Dst->au64[0] = pu256Src->au64[0];
7359 pu256Dst->au64[1] = pu256Src->au64[1];
7360 pu256Dst->au64[2] = pu256Src->au64[2];
7361 pu256Dst->au64[3] = pu256Src->au64[3];
7362 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7363 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7364 }
7365 return rc;
7366}
7367
7368
7369#ifdef IEM_WITH_SETJMP
7370/**
7371 * Fetches a data oword (octo word), generally AVX related.
7372 *
7373 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7374 * @param pu256Dst Where to return the qword.
7375 * @param iSegReg The index of the segment register to use for
7376 * this access. The base and limits are checked.
7377 * @param GCPtrMem The address of the guest memory.
7378 */
7379void iemMemFetchDataU256NoAcJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7380{
7381 /* The lazy approach for now... */
7382 uint8_t bUnmapInfo;
7383 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7384 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7385 pu256Dst->au64[0] = pu256Src->au64[0];
7386 pu256Dst->au64[1] = pu256Src->au64[1];
7387 pu256Dst->au64[2] = pu256Src->au64[2];
7388 pu256Dst->au64[3] = pu256Src->au64[3];
7389 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7390 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7391}
7392#endif
7393
7394
7395/**
7396 * Fetches a data oword (octo word) at an aligned address, generally AVX
7397 * related.
7398 *
7399 * Raises \#GP(0) if not aligned.
7400 *
7401 * @returns Strict VBox status code.
7402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7403 * @param pu256Dst Where to return the qword.
7404 * @param iSegReg The index of the segment register to use for
7405 * this access. The base and limits are checked.
7406 * @param GCPtrMem The address of the guest memory.
7407 */
7408VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7409{
7410 /* The lazy approach for now... */
7411 uint8_t bUnmapInfo;
7412 PCRTUINT256U pu256Src;
7413 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7414 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7415 if (rc == VINF_SUCCESS)
7416 {
7417 pu256Dst->au64[0] = pu256Src->au64[0];
7418 pu256Dst->au64[1] = pu256Src->au64[1];
7419 pu256Dst->au64[2] = pu256Src->au64[2];
7420 pu256Dst->au64[3] = pu256Src->au64[3];
7421 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7422 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7423 }
7424 return rc;
7425}
7426
7427
7428#ifdef IEM_WITH_SETJMP
7429/**
7430 * Fetches a data oword (octo word) at an aligned address, generally AVX
7431 * related, longjmp on error.
7432 *
7433 * Raises \#GP(0) if not aligned.
7434 *
7435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7436 * @param pu256Dst Where to return the qword.
7437 * @param iSegReg The index of the segment register to use for
7438 * this access. The base and limits are checked.
7439 * @param GCPtrMem The address of the guest memory.
7440 */
7441void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7442 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7443{
7444 /* The lazy approach for now... */
7445 uint8_t bUnmapInfo;
7446 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7447 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7448 pu256Dst->au64[0] = pu256Src->au64[0];
7449 pu256Dst->au64[1] = pu256Src->au64[1];
7450 pu256Dst->au64[2] = pu256Src->au64[2];
7451 pu256Dst->au64[3] = pu256Src->au64[3];
7452 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7453 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7454}
7455#endif
7456
7457
7458
7459/**
7460 * Fetches a descriptor register (lgdt, lidt).
7461 *
7462 * @returns Strict VBox status code.
7463 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7464 * @param pcbLimit Where to return the limit.
7465 * @param pGCPtrBase Where to return the base.
7466 * @param iSegReg The index of the segment register to use for
7467 * this access. The base and limits are checked.
7468 * @param GCPtrMem The address of the guest memory.
7469 * @param enmOpSize The effective operand size.
7470 */
7471VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7472 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7473{
7474 /*
7475 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7476 * little special:
7477 * - The two reads are done separately.
7478 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7479 * - We suspect the 386 to actually commit the limit before the base in
7480 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7481 * don't try emulate this eccentric behavior, because it's not well
7482 * enough understood and rather hard to trigger.
7483 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7484 */
7485 VBOXSTRICTRC rcStrict;
7486 if (IEM_IS_64BIT_CODE(pVCpu))
7487 {
7488 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7489 if (rcStrict == VINF_SUCCESS)
7490 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7491 }
7492 else
7493 {
7494 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7495 if (enmOpSize == IEMMODE_32BIT)
7496 {
7497 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7498 {
7499 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7500 if (rcStrict == VINF_SUCCESS)
7501 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7502 }
7503 else
7504 {
7505 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7506 if (rcStrict == VINF_SUCCESS)
7507 {
7508 *pcbLimit = (uint16_t)uTmp;
7509 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7510 }
7511 }
7512 if (rcStrict == VINF_SUCCESS)
7513 *pGCPtrBase = uTmp;
7514 }
7515 else
7516 {
7517 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7518 if (rcStrict == VINF_SUCCESS)
7519 {
7520 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7521 if (rcStrict == VINF_SUCCESS)
7522 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7523 }
7524 }
7525 }
7526 return rcStrict;
7527}
7528
7529
7530/**
7531 * Stores a data dqword, SSE aligned.
7532 *
7533 * @returns Strict VBox status code.
7534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7535 * @param iSegReg The index of the segment register to use for
7536 * this access. The base and limits are checked.
7537 * @param GCPtrMem The address of the guest memory.
7538 * @param u128Value The value to store.
7539 */
7540VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7541{
7542 /* The lazy approach for now... */
7543 uint8_t bUnmapInfo;
7544 PRTUINT128U pu128Dst;
7545 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7546 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7547 if (rc == VINF_SUCCESS)
7548 {
7549 pu128Dst->au64[0] = u128Value.au64[0];
7550 pu128Dst->au64[1] = u128Value.au64[1];
7551 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7552 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7553 }
7554 return rc;
7555}
7556
7557
7558#ifdef IEM_WITH_SETJMP
7559/**
7560 * Stores a data dqword, SSE aligned.
7561 *
7562 * @returns Strict VBox status code.
7563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7564 * @param iSegReg The index of the segment register to use for
7565 * this access. The base and limits are checked.
7566 * @param GCPtrMem The address of the guest memory.
7567 * @param u128Value The value to store.
7568 */
7569void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7570 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7571{
7572 /* The lazy approach for now... */
7573 uint8_t bUnmapInfo;
7574 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7575 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7576 pu128Dst->au64[0] = u128Value.au64[0];
7577 pu128Dst->au64[1] = u128Value.au64[1];
7578 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7579 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7580}
7581#endif
7582
7583
7584/**
7585 * Stores a data dqword.
7586 *
7587 * @returns Strict VBox status code.
7588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7589 * @param iSegReg The index of the segment register to use for
7590 * this access. The base and limits are checked.
7591 * @param GCPtrMem The address of the guest memory.
7592 * @param pu256Value Pointer to the value to store.
7593 */
7594VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7595{
7596 /* The lazy approach for now... */
7597 uint8_t bUnmapInfo;
7598 PRTUINT256U pu256Dst;
7599 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7600 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7601 if (rc == VINF_SUCCESS)
7602 {
7603 pu256Dst->au64[0] = pu256Value->au64[0];
7604 pu256Dst->au64[1] = pu256Value->au64[1];
7605 pu256Dst->au64[2] = pu256Value->au64[2];
7606 pu256Dst->au64[3] = pu256Value->au64[3];
7607 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7608 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7609 }
7610 return rc;
7611}
7612
7613
7614#ifdef IEM_WITH_SETJMP
7615/**
7616 * Stores a data dqword, longjmp on error.
7617 *
7618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7619 * @param iSegReg The index of the segment register to use for
7620 * this access. The base and limits are checked.
7621 * @param GCPtrMem The address of the guest memory.
7622 * @param pu256Value Pointer to the value to store.
7623 */
7624void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7625{
7626 /* The lazy approach for now... */
7627 uint8_t bUnmapInfo;
7628 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7629 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7630 pu256Dst->au64[0] = pu256Value->au64[0];
7631 pu256Dst->au64[1] = pu256Value->au64[1];
7632 pu256Dst->au64[2] = pu256Value->au64[2];
7633 pu256Dst->au64[3] = pu256Value->au64[3];
7634 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7635 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7636}
7637#endif
7638
7639
7640/**
7641 * Stores a data qqword.
7642 *
7643 * @returns Strict VBox status code.
7644 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7645 * @param iSegReg The index of the segment register to use for
7646 * this access. The base and limits are checked.
7647 * @param GCPtrMem The address of the guest memory.
7648 * @param pu256Value Pointer to the value to store.
7649 */
7650VBOXSTRICTRC iemMemStoreDataU256NoAc(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7651{
7652 /* The lazy approach for now... */
7653 uint8_t bUnmapInfo;
7654 PRTUINT256U pu256Dst;
7655 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7656 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7657 if (rc == VINF_SUCCESS)
7658 {
7659 pu256Dst->au64[0] = pu256Value->au64[0];
7660 pu256Dst->au64[1] = pu256Value->au64[1];
7661 pu256Dst->au64[2] = pu256Value->au64[2];
7662 pu256Dst->au64[3] = pu256Value->au64[3];
7663 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7664 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7665 }
7666 return rc;
7667}
7668
7669
7670#ifdef IEM_WITH_SETJMP
7671/**
7672 * Stores a data qqword, longjmp on error.
7673 *
7674 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7675 * @param iSegReg The index of the segment register to use for
7676 * this access. The base and limits are checked.
7677 * @param GCPtrMem The address of the guest memory.
7678 * @param pu256Value Pointer to the value to store.
7679 */
7680void iemMemStoreDataU256NoAcJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7681{
7682 /* The lazy approach for now... */
7683 uint8_t bUnmapInfo;
7684 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7685 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7686 pu256Dst->au64[0] = pu256Value->au64[0];
7687 pu256Dst->au64[1] = pu256Value->au64[1];
7688 pu256Dst->au64[2] = pu256Value->au64[2];
7689 pu256Dst->au64[3] = pu256Value->au64[3];
7690 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7691 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7692}
7693#endif
7694
7695
7696/**
7697 * Stores a data dqword, AVX \#GP(0) aligned.
7698 *
7699 * @returns Strict VBox status code.
7700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7701 * @param iSegReg The index of the segment register to use for
7702 * this access. The base and limits are checked.
7703 * @param GCPtrMem The address of the guest memory.
7704 * @param pu256Value Pointer to the value to store.
7705 */
7706VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7707{
7708 /* The lazy approach for now... */
7709 uint8_t bUnmapInfo;
7710 PRTUINT256U pu256Dst;
7711 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7712 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7713 if (rc == VINF_SUCCESS)
7714 {
7715 pu256Dst->au64[0] = pu256Value->au64[0];
7716 pu256Dst->au64[1] = pu256Value->au64[1];
7717 pu256Dst->au64[2] = pu256Value->au64[2];
7718 pu256Dst->au64[3] = pu256Value->au64[3];
7719 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7720 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7721 }
7722 return rc;
7723}
7724
7725
7726#ifdef IEM_WITH_SETJMP
7727/**
7728 * Stores a data dqword, AVX aligned.
7729 *
7730 * @returns Strict VBox status code.
7731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7732 * @param iSegReg The index of the segment register to use for
7733 * this access. The base and limits are checked.
7734 * @param GCPtrMem The address of the guest memory.
7735 * @param pu256Value Pointer to the value to store.
7736 */
7737void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7738 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7739{
7740 /* The lazy approach for now... */
7741 uint8_t bUnmapInfo;
7742 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7743 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7744 pu256Dst->au64[0] = pu256Value->au64[0];
7745 pu256Dst->au64[1] = pu256Value->au64[1];
7746 pu256Dst->au64[2] = pu256Value->au64[2];
7747 pu256Dst->au64[3] = pu256Value->au64[3];
7748 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7749 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7750}
7751#endif
7752
7753
7754/**
7755 * Stores a descriptor register (sgdt, sidt).
7756 *
7757 * @returns Strict VBox status code.
7758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7759 * @param cbLimit The limit.
7760 * @param GCPtrBase The base address.
7761 * @param iSegReg The index of the segment register to use for
7762 * this access. The base and limits are checked.
7763 * @param GCPtrMem The address of the guest memory.
7764 */
7765VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7766{
7767 /*
7768 * The SIDT and SGDT instructions actually stores the data using two
7769 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7770 * does not respond to opsize prefixes.
7771 */
7772 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7773 if (rcStrict == VINF_SUCCESS)
7774 {
7775 if (IEM_IS_16BIT_CODE(pVCpu))
7776 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7777 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7778 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7779 else if (IEM_IS_32BIT_CODE(pVCpu))
7780 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7781 else
7782 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7783 }
7784 return rcStrict;
7785}
7786
7787
7788/**
7789 * Begin a special stack push (used by interrupt, exceptions and such).
7790 *
7791 * This will raise \#SS or \#PF if appropriate.
7792 *
7793 * @returns Strict VBox status code.
7794 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7795 * @param cbMem The number of bytes to push onto the stack.
7796 * @param cbAlign The alignment mask (7, 3, 1).
7797 * @param ppvMem Where to return the pointer to the stack memory.
7798 * As with the other memory functions this could be
7799 * direct access or bounce buffered access, so
7800 * don't commit register until the commit call
7801 * succeeds.
7802 * @param pbUnmapInfo Where to store unmap info for
7803 * iemMemStackPushCommitSpecial.
7804 * @param puNewRsp Where to return the new RSP value. This must be
7805 * passed unchanged to
7806 * iemMemStackPushCommitSpecial().
7807 */
7808VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7809 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7810{
7811 Assert(cbMem < UINT8_MAX);
7812 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7813 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7814}
7815
7816
7817/**
7818 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7819 *
7820 * This will update the rSP.
7821 *
7822 * @returns Strict VBox status code.
7823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7824 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7825 * @param uNewRsp The new RSP value returned by
7826 * iemMemStackPushBeginSpecial().
7827 */
7828VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7829{
7830 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7831 if (rcStrict == VINF_SUCCESS)
7832 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7833 return rcStrict;
7834}
7835
7836
7837/**
7838 * Begin a special stack pop (used by iret, retf and such).
7839 *
7840 * This will raise \#SS or \#PF if appropriate.
7841 *
7842 * @returns Strict VBox status code.
7843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7844 * @param cbMem The number of bytes to pop from the stack.
7845 * @param cbAlign The alignment mask (7, 3, 1).
7846 * @param ppvMem Where to return the pointer to the stack memory.
7847 * @param pbUnmapInfo Where to store unmap info for
7848 * iemMemStackPopDoneSpecial.
7849 * @param puNewRsp Where to return the new RSP value. This must be
7850 * assigned to CPUMCTX::rsp manually some time
7851 * after iemMemStackPopDoneSpecial() has been
7852 * called.
7853 */
7854VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7855 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7856{
7857 Assert(cbMem < UINT8_MAX);
7858 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7859 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7860}
7861
7862
7863/**
7864 * Continue a special stack pop (used by iret and retf), for the purpose of
7865 * retrieving a new stack pointer.
7866 *
7867 * This will raise \#SS or \#PF if appropriate.
7868 *
7869 * @returns Strict VBox status code.
7870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7871 * @param off Offset from the top of the stack. This is zero
7872 * except in the retf case.
7873 * @param cbMem The number of bytes to pop from the stack.
7874 * @param ppvMem Where to return the pointer to the stack memory.
7875 * @param pbUnmapInfo Where to store unmap info for
7876 * iemMemStackPopDoneSpecial.
7877 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7878 * return this because all use of this function is
7879 * to retrieve a new value and anything we return
7880 * here would be discarded.)
7881 */
7882VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7883 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7884{
7885 Assert(cbMem < UINT8_MAX);
7886
7887 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7888 RTGCPTR GCPtrTop;
7889 if (IEM_IS_64BIT_CODE(pVCpu))
7890 GCPtrTop = uCurNewRsp;
7891 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7892 GCPtrTop = (uint32_t)uCurNewRsp;
7893 else
7894 GCPtrTop = (uint16_t)uCurNewRsp;
7895
7896 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7897 0 /* checked in iemMemStackPopBeginSpecial */);
7898}
7899
7900
7901/**
7902 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7903 * iemMemStackPopContinueSpecial).
7904 *
7905 * The caller will manually commit the rSP.
7906 *
7907 * @returns Strict VBox status code.
7908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7909 * @param bUnmapInfo Unmap information returned by
7910 * iemMemStackPopBeginSpecial() or
7911 * iemMemStackPopContinueSpecial().
7912 */
7913VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7914{
7915 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7916}
7917
7918
7919/**
7920 * Fetches a system table byte.
7921 *
7922 * @returns Strict VBox status code.
7923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7924 * @param pbDst Where to return the byte.
7925 * @param iSegReg The index of the segment register to use for
7926 * this access. The base and limits are checked.
7927 * @param GCPtrMem The address of the guest memory.
7928 */
7929VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7930{
7931 /* The lazy approach for now... */
7932 uint8_t bUnmapInfo;
7933 uint8_t const *pbSrc;
7934 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7935 if (rc == VINF_SUCCESS)
7936 {
7937 *pbDst = *pbSrc;
7938 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7939 }
7940 return rc;
7941}
7942
7943
7944/**
7945 * Fetches a system table word.
7946 *
7947 * @returns Strict VBox status code.
7948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7949 * @param pu16Dst Where to return the word.
7950 * @param iSegReg The index of the segment register to use for
7951 * this access. The base and limits are checked.
7952 * @param GCPtrMem The address of the guest memory.
7953 */
7954VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7955{
7956 /* The lazy approach for now... */
7957 uint8_t bUnmapInfo;
7958 uint16_t const *pu16Src;
7959 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7960 if (rc == VINF_SUCCESS)
7961 {
7962 *pu16Dst = *pu16Src;
7963 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7964 }
7965 return rc;
7966}
7967
7968
7969/**
7970 * Fetches a system table dword.
7971 *
7972 * @returns Strict VBox status code.
7973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7974 * @param pu32Dst Where to return the dword.
7975 * @param iSegReg The index of the segment register to use for
7976 * this access. The base and limits are checked.
7977 * @param GCPtrMem The address of the guest memory.
7978 */
7979VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7980{
7981 /* The lazy approach for now... */
7982 uint8_t bUnmapInfo;
7983 uint32_t const *pu32Src;
7984 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7985 if (rc == VINF_SUCCESS)
7986 {
7987 *pu32Dst = *pu32Src;
7988 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7989 }
7990 return rc;
7991}
7992
7993
7994/**
7995 * Fetches a system table qword.
7996 *
7997 * @returns Strict VBox status code.
7998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7999 * @param pu64Dst Where to return the qword.
8000 * @param iSegReg The index of the segment register to use for
8001 * this access. The base and limits are checked.
8002 * @param GCPtrMem The address of the guest memory.
8003 */
8004VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8005{
8006 /* The lazy approach for now... */
8007 uint8_t bUnmapInfo;
8008 uint64_t const *pu64Src;
8009 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8010 if (rc == VINF_SUCCESS)
8011 {
8012 *pu64Dst = *pu64Src;
8013 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8014 }
8015 return rc;
8016}
8017
8018
8019/**
8020 * Fetches a descriptor table entry with caller specified error code.
8021 *
8022 * @returns Strict VBox status code.
8023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8024 * @param pDesc Where to return the descriptor table entry.
8025 * @param uSel The selector which table entry to fetch.
8026 * @param uXcpt The exception to raise on table lookup error.
8027 * @param uErrorCode The error code associated with the exception.
8028 */
8029static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8030 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8031{
8032 AssertPtr(pDesc);
8033 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8034
8035 /** @todo did the 286 require all 8 bytes to be accessible? */
8036 /*
8037 * Get the selector table base and check bounds.
8038 */
8039 RTGCPTR GCPtrBase;
8040 if (uSel & X86_SEL_LDT)
8041 {
8042 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8043 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8044 {
8045 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8046 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8047 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8048 uErrorCode, 0);
8049 }
8050
8051 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8052 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8053 }
8054 else
8055 {
8056 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8057 {
8058 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8059 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8060 uErrorCode, 0);
8061 }
8062 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8063 }
8064
8065 /*
8066 * Read the legacy descriptor and maybe the long mode extensions if
8067 * required.
8068 */
8069 VBOXSTRICTRC rcStrict;
8070 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8071 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8072 else
8073 {
8074 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8075 if (rcStrict == VINF_SUCCESS)
8076 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8077 if (rcStrict == VINF_SUCCESS)
8078 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8079 if (rcStrict == VINF_SUCCESS)
8080 pDesc->Legacy.au16[3] = 0;
8081 else
8082 return rcStrict;
8083 }
8084
8085 if (rcStrict == VINF_SUCCESS)
8086 {
8087 if ( !IEM_IS_LONG_MODE(pVCpu)
8088 || pDesc->Legacy.Gen.u1DescType)
8089 pDesc->Long.au64[1] = 0;
8090 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8091 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8092 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8093 else
8094 {
8095 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8096 /** @todo is this the right exception? */
8097 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8098 }
8099 }
8100 return rcStrict;
8101}
8102
8103
8104/**
8105 * Fetches a descriptor table entry.
8106 *
8107 * @returns Strict VBox status code.
8108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8109 * @param pDesc Where to return the descriptor table entry.
8110 * @param uSel The selector which table entry to fetch.
8111 * @param uXcpt The exception to raise on table lookup error.
8112 */
8113VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8114{
8115 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8116}
8117
8118
8119/**
8120 * Marks the selector descriptor as accessed (only non-system descriptors).
8121 *
8122 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8123 * will therefore skip the limit checks.
8124 *
8125 * @returns Strict VBox status code.
8126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8127 * @param uSel The selector.
8128 */
8129VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8130{
8131 /*
8132 * Get the selector table base and calculate the entry address.
8133 */
8134 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8135 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8136 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8137 GCPtr += uSel & X86_SEL_MASK;
8138
8139 /*
8140 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8141 * ugly stuff to avoid this. This will make sure it's an atomic access
8142 * as well more or less remove any question about 8-bit or 32-bit accesss.
8143 */
8144 VBOXSTRICTRC rcStrict;
8145 uint8_t bUnmapInfo;
8146 uint32_t volatile *pu32;
8147 if ((GCPtr & 3) == 0)
8148 {
8149 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8150 GCPtr += 2 + 2;
8151 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8152 if (rcStrict != VINF_SUCCESS)
8153 return rcStrict;
8154 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8155 }
8156 else
8157 {
8158 /* The misaligned GDT/LDT case, map the whole thing. */
8159 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8160 if (rcStrict != VINF_SUCCESS)
8161 return rcStrict;
8162 switch ((uintptr_t)pu32 & 3)
8163 {
8164 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8165 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8166 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8167 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8168 }
8169 }
8170
8171 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8172}
8173
8174
8175#undef LOG_GROUP
8176#define LOG_GROUP LOG_GROUP_IEM
8177
8178/** @} */
8179
8180/** @name Opcode Helpers.
8181 * @{
8182 */
8183
8184/**
8185 * Calculates the effective address of a ModR/M memory operand.
8186 *
8187 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8188 *
8189 * @return Strict VBox status code.
8190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8191 * @param bRm The ModRM byte.
8192 * @param cbImmAndRspOffset - First byte: The size of any immediate
8193 * following the effective address opcode bytes
8194 * (only for RIP relative addressing).
8195 * - Second byte: RSP displacement (for POP [ESP]).
8196 * @param pGCPtrEff Where to return the effective address.
8197 */
8198VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8199{
8200 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8201# define SET_SS_DEF() \
8202 do \
8203 { \
8204 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8205 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8206 } while (0)
8207
8208 if (!IEM_IS_64BIT_CODE(pVCpu))
8209 {
8210/** @todo Check the effective address size crap! */
8211 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8212 {
8213 uint16_t u16EffAddr;
8214
8215 /* Handle the disp16 form with no registers first. */
8216 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8217 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8218 else
8219 {
8220 /* Get the displacment. */
8221 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8222 {
8223 case 0: u16EffAddr = 0; break;
8224 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8225 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8226 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8227 }
8228
8229 /* Add the base and index registers to the disp. */
8230 switch (bRm & X86_MODRM_RM_MASK)
8231 {
8232 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8233 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8234 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8235 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8236 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8237 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8238 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8239 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8240 }
8241 }
8242
8243 *pGCPtrEff = u16EffAddr;
8244 }
8245 else
8246 {
8247 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8248 uint32_t u32EffAddr;
8249
8250 /* Handle the disp32 form with no registers first. */
8251 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8252 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8253 else
8254 {
8255 /* Get the register (or SIB) value. */
8256 switch ((bRm & X86_MODRM_RM_MASK))
8257 {
8258 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8259 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8260 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8261 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8262 case 4: /* SIB */
8263 {
8264 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8265
8266 /* Get the index and scale it. */
8267 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8268 {
8269 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8270 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8271 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8272 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8273 case 4: u32EffAddr = 0; /*none */ break;
8274 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8275 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8276 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8277 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8278 }
8279 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8280
8281 /* add base */
8282 switch (bSib & X86_SIB_BASE_MASK)
8283 {
8284 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8285 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8286 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8287 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8288 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8289 case 5:
8290 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8291 {
8292 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8293 SET_SS_DEF();
8294 }
8295 else
8296 {
8297 uint32_t u32Disp;
8298 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8299 u32EffAddr += u32Disp;
8300 }
8301 break;
8302 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8303 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8304 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8305 }
8306 break;
8307 }
8308 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8309 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8310 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8311 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8312 }
8313
8314 /* Get and add the displacement. */
8315 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8316 {
8317 case 0:
8318 break;
8319 case 1:
8320 {
8321 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8322 u32EffAddr += i8Disp;
8323 break;
8324 }
8325 case 2:
8326 {
8327 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8328 u32EffAddr += u32Disp;
8329 break;
8330 }
8331 default:
8332 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8333 }
8334
8335 }
8336 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8337 *pGCPtrEff = u32EffAddr;
8338 }
8339 }
8340 else
8341 {
8342 uint64_t u64EffAddr;
8343
8344 /* Handle the rip+disp32 form with no registers first. */
8345 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8346 {
8347 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8348 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8349 }
8350 else
8351 {
8352 /* Get the register (or SIB) value. */
8353 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8354 {
8355 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8356 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8357 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8358 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8359 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8360 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8361 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8362 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8363 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8364 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8365 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8366 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8367 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8368 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8369 /* SIB */
8370 case 4:
8371 case 12:
8372 {
8373 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8374
8375 /* Get the index and scale it. */
8376 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8377 {
8378 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8379 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8380 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8381 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8382 case 4: u64EffAddr = 0; /*none */ break;
8383 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8384 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8385 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8386 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8387 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8388 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8389 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8390 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8391 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8392 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8393 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8394 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8395 }
8396 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8397
8398 /* add base */
8399 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8400 {
8401 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8402 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8403 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8404 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8405 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8406 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8407 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8408 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8409 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8410 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8411 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8412 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8413 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8414 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8415 /* complicated encodings */
8416 case 5:
8417 case 13:
8418 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8419 {
8420 if (!pVCpu->iem.s.uRexB)
8421 {
8422 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8423 SET_SS_DEF();
8424 }
8425 else
8426 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8427 }
8428 else
8429 {
8430 uint32_t u32Disp;
8431 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8432 u64EffAddr += (int32_t)u32Disp;
8433 }
8434 break;
8435 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8436 }
8437 break;
8438 }
8439 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8440 }
8441
8442 /* Get and add the displacement. */
8443 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8444 {
8445 case 0:
8446 break;
8447 case 1:
8448 {
8449 int8_t i8Disp;
8450 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8451 u64EffAddr += i8Disp;
8452 break;
8453 }
8454 case 2:
8455 {
8456 uint32_t u32Disp;
8457 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8458 u64EffAddr += (int32_t)u32Disp;
8459 break;
8460 }
8461 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8462 }
8463
8464 }
8465
8466 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8467 *pGCPtrEff = u64EffAddr;
8468 else
8469 {
8470 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8471 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8472 }
8473 }
8474
8475 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8476 return VINF_SUCCESS;
8477}
8478
8479
8480#ifdef IEM_WITH_SETJMP
8481/**
8482 * Calculates the effective address of a ModR/M memory operand.
8483 *
8484 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8485 *
8486 * May longjmp on internal error.
8487 *
8488 * @return The effective address.
8489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8490 * @param bRm The ModRM byte.
8491 * @param cbImmAndRspOffset - First byte: The size of any immediate
8492 * following the effective address opcode bytes
8493 * (only for RIP relative addressing).
8494 * - Second byte: RSP displacement (for POP [ESP]).
8495 */
8496RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8497{
8498 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8499# define SET_SS_DEF() \
8500 do \
8501 { \
8502 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8503 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8504 } while (0)
8505
8506 if (!IEM_IS_64BIT_CODE(pVCpu))
8507 {
8508/** @todo Check the effective address size crap! */
8509 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8510 {
8511 uint16_t u16EffAddr;
8512
8513 /* Handle the disp16 form with no registers first. */
8514 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8515 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8516 else
8517 {
8518 /* Get the displacment. */
8519 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8520 {
8521 case 0: u16EffAddr = 0; break;
8522 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8523 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8524 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8525 }
8526
8527 /* Add the base and index registers to the disp. */
8528 switch (bRm & X86_MODRM_RM_MASK)
8529 {
8530 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8531 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8532 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8533 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8534 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8535 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8536 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8537 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8538 }
8539 }
8540
8541 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8542 return u16EffAddr;
8543 }
8544
8545 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8546 uint32_t u32EffAddr;
8547
8548 /* Handle the disp32 form with no registers first. */
8549 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8550 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8551 else
8552 {
8553 /* Get the register (or SIB) value. */
8554 switch ((bRm & X86_MODRM_RM_MASK))
8555 {
8556 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8557 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8558 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8559 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8560 case 4: /* SIB */
8561 {
8562 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8563
8564 /* Get the index and scale it. */
8565 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8566 {
8567 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8568 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8569 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8570 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8571 case 4: u32EffAddr = 0; /*none */ break;
8572 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8573 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8574 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8575 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8576 }
8577 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8578
8579 /* add base */
8580 switch (bSib & X86_SIB_BASE_MASK)
8581 {
8582 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8583 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8584 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8585 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8586 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8587 case 5:
8588 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8589 {
8590 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8591 SET_SS_DEF();
8592 }
8593 else
8594 {
8595 uint32_t u32Disp;
8596 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8597 u32EffAddr += u32Disp;
8598 }
8599 break;
8600 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8601 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8602 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8603 }
8604 break;
8605 }
8606 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8607 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8608 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8609 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8610 }
8611
8612 /* Get and add the displacement. */
8613 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8614 {
8615 case 0:
8616 break;
8617 case 1:
8618 {
8619 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8620 u32EffAddr += i8Disp;
8621 break;
8622 }
8623 case 2:
8624 {
8625 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8626 u32EffAddr += u32Disp;
8627 break;
8628 }
8629 default:
8630 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8631 }
8632 }
8633
8634 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8635 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8636 return u32EffAddr;
8637 }
8638
8639 uint64_t u64EffAddr;
8640
8641 /* Handle the rip+disp32 form with no registers first. */
8642 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8643 {
8644 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8645 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8646 }
8647 else
8648 {
8649 /* Get the register (or SIB) value. */
8650 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8651 {
8652 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8653 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8654 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8655 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8656 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8657 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8658 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8659 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8660 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8661 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8662 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8663 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8664 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8665 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8666 /* SIB */
8667 case 4:
8668 case 12:
8669 {
8670 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8671
8672 /* Get the index and scale it. */
8673 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8674 {
8675 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8676 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8677 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8678 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8679 case 4: u64EffAddr = 0; /*none */ break;
8680 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8681 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8682 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8683 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8684 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8685 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8686 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8687 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8688 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8689 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8690 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8691 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8692 }
8693 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8694
8695 /* add base */
8696 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8697 {
8698 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8699 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8700 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8701 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8702 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8703 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8704 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8705 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8706 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8707 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8708 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8709 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8710 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8711 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8712 /* complicated encodings */
8713 case 5:
8714 case 13:
8715 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8716 {
8717 if (!pVCpu->iem.s.uRexB)
8718 {
8719 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8720 SET_SS_DEF();
8721 }
8722 else
8723 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8724 }
8725 else
8726 {
8727 uint32_t u32Disp;
8728 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8729 u64EffAddr += (int32_t)u32Disp;
8730 }
8731 break;
8732 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8733 }
8734 break;
8735 }
8736 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8737 }
8738
8739 /* Get and add the displacement. */
8740 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8741 {
8742 case 0:
8743 break;
8744 case 1:
8745 {
8746 int8_t i8Disp;
8747 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8748 u64EffAddr += i8Disp;
8749 break;
8750 }
8751 case 2:
8752 {
8753 uint32_t u32Disp;
8754 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8755 u64EffAddr += (int32_t)u32Disp;
8756 break;
8757 }
8758 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8759 }
8760
8761 }
8762
8763 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8764 {
8765 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8766 return u64EffAddr;
8767 }
8768 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8769 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8770 return u64EffAddr & UINT32_MAX;
8771}
8772#endif /* IEM_WITH_SETJMP */
8773
8774
8775/**
8776 * Calculates the effective address of a ModR/M memory operand, extended version
8777 * for use in the recompilers.
8778 *
8779 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8780 *
8781 * @return Strict VBox status code.
8782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8783 * @param bRm The ModRM byte.
8784 * @param cbImmAndRspOffset - First byte: The size of any immediate
8785 * following the effective address opcode bytes
8786 * (only for RIP relative addressing).
8787 * - Second byte: RSP displacement (for POP [ESP]).
8788 * @param pGCPtrEff Where to return the effective address.
8789 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8790 * SIB byte (bits 39:32).
8791 */
8792VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8793{
8794 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8795# define SET_SS_DEF() \
8796 do \
8797 { \
8798 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8799 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8800 } while (0)
8801
8802 uint64_t uInfo;
8803 if (!IEM_IS_64BIT_CODE(pVCpu))
8804 {
8805/** @todo Check the effective address size crap! */
8806 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8807 {
8808 uint16_t u16EffAddr;
8809
8810 /* Handle the disp16 form with no registers first. */
8811 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8812 {
8813 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8814 uInfo = u16EffAddr;
8815 }
8816 else
8817 {
8818 /* Get the displacment. */
8819 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8820 {
8821 case 0: u16EffAddr = 0; break;
8822 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8823 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8824 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8825 }
8826 uInfo = u16EffAddr;
8827
8828 /* Add the base and index registers to the disp. */
8829 switch (bRm & X86_MODRM_RM_MASK)
8830 {
8831 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8832 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8833 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8834 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8835 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8836 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8837 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8838 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8839 }
8840 }
8841
8842 *pGCPtrEff = u16EffAddr;
8843 }
8844 else
8845 {
8846 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8847 uint32_t u32EffAddr;
8848
8849 /* Handle the disp32 form with no registers first. */
8850 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8851 {
8852 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8853 uInfo = u32EffAddr;
8854 }
8855 else
8856 {
8857 /* Get the register (or SIB) value. */
8858 uInfo = 0;
8859 switch ((bRm & X86_MODRM_RM_MASK))
8860 {
8861 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8862 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8863 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8864 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8865 case 4: /* SIB */
8866 {
8867 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8868 uInfo = (uint64_t)bSib << 32;
8869
8870 /* Get the index and scale it. */
8871 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8872 {
8873 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8874 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8875 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8876 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8877 case 4: u32EffAddr = 0; /*none */ break;
8878 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8879 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8880 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8881 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8882 }
8883 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8884
8885 /* add base */
8886 switch (bSib & X86_SIB_BASE_MASK)
8887 {
8888 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8889 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8890 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8891 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8892 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8893 case 5:
8894 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8895 {
8896 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8897 SET_SS_DEF();
8898 }
8899 else
8900 {
8901 uint32_t u32Disp;
8902 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8903 u32EffAddr += u32Disp;
8904 uInfo |= u32Disp;
8905 }
8906 break;
8907 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8908 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8909 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8910 }
8911 break;
8912 }
8913 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8914 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8915 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8916 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8917 }
8918
8919 /* Get and add the displacement. */
8920 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8921 {
8922 case 0:
8923 break;
8924 case 1:
8925 {
8926 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8927 u32EffAddr += i8Disp;
8928 uInfo |= (uint32_t)(int32_t)i8Disp;
8929 break;
8930 }
8931 case 2:
8932 {
8933 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8934 u32EffAddr += u32Disp;
8935 uInfo |= (uint32_t)u32Disp;
8936 break;
8937 }
8938 default:
8939 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8940 }
8941
8942 }
8943 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8944 *pGCPtrEff = u32EffAddr;
8945 }
8946 }
8947 else
8948 {
8949 uint64_t u64EffAddr;
8950
8951 /* Handle the rip+disp32 form with no registers first. */
8952 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8953 {
8954 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8955 uInfo = (uint32_t)u64EffAddr;
8956 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8957 }
8958 else
8959 {
8960 /* Get the register (or SIB) value. */
8961 uInfo = 0;
8962 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8963 {
8964 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8965 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8966 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8967 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8968 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8969 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8970 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8971 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8972 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8973 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8974 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8975 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8976 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8977 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8978 /* SIB */
8979 case 4:
8980 case 12:
8981 {
8982 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8983 uInfo = (uint64_t)bSib << 32;
8984
8985 /* Get the index and scale it. */
8986 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8987 {
8988 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8989 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8990 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8991 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8992 case 4: u64EffAddr = 0; /*none */ break;
8993 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8994 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8995 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8996 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8997 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8998 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8999 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9000 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9001 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9002 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9003 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9004 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9005 }
9006 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9007
9008 /* add base */
9009 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9010 {
9011 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9012 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9013 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9014 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9015 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9016 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9017 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9018 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9019 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9020 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9021 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9022 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9023 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9024 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9025 /* complicated encodings */
9026 case 5:
9027 case 13:
9028 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9029 {
9030 if (!pVCpu->iem.s.uRexB)
9031 {
9032 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9033 SET_SS_DEF();
9034 }
9035 else
9036 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9037 }
9038 else
9039 {
9040 uint32_t u32Disp;
9041 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9042 u64EffAddr += (int32_t)u32Disp;
9043 uInfo |= u32Disp;
9044 }
9045 break;
9046 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9047 }
9048 break;
9049 }
9050 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9051 }
9052
9053 /* Get and add the displacement. */
9054 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9055 {
9056 case 0:
9057 break;
9058 case 1:
9059 {
9060 int8_t i8Disp;
9061 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9062 u64EffAddr += i8Disp;
9063 uInfo |= (uint32_t)(int32_t)i8Disp;
9064 break;
9065 }
9066 case 2:
9067 {
9068 uint32_t u32Disp;
9069 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9070 u64EffAddr += (int32_t)u32Disp;
9071 uInfo |= u32Disp;
9072 break;
9073 }
9074 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9075 }
9076
9077 }
9078
9079 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9080 *pGCPtrEff = u64EffAddr;
9081 else
9082 {
9083 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9084 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9085 }
9086 }
9087 *puInfo = uInfo;
9088
9089 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9090 return VINF_SUCCESS;
9091}
9092
9093/** @} */
9094
9095
9096#ifdef LOG_ENABLED
9097/**
9098 * Logs the current instruction.
9099 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9100 * @param fSameCtx Set if we have the same context information as the VMM,
9101 * clear if we may have already executed an instruction in
9102 * our debug context. When clear, we assume IEMCPU holds
9103 * valid CPU mode info.
9104 *
9105 * The @a fSameCtx parameter is now misleading and obsolete.
9106 * @param pszFunction The IEM function doing the execution.
9107 */
9108static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9109{
9110# ifdef IN_RING3
9111 if (LogIs2Enabled())
9112 {
9113 char szInstr[256];
9114 uint32_t cbInstr = 0;
9115 if (fSameCtx)
9116 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9117 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9118 szInstr, sizeof(szInstr), &cbInstr);
9119 else
9120 {
9121 uint32_t fFlags = 0;
9122 switch (IEM_GET_CPU_MODE(pVCpu))
9123 {
9124 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9125 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9126 case IEMMODE_16BIT:
9127 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9128 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9129 else
9130 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9131 break;
9132 }
9133 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9134 szInstr, sizeof(szInstr), &cbInstr);
9135 }
9136
9137 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9138 Log2(("**** %s fExec=%x\n"
9139 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9140 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9141 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9142 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9143 " %s\n"
9144 , pszFunction, pVCpu->iem.s.fExec,
9145 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9146 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9147 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9148 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9149 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9150 szInstr));
9151
9152 /* This stuff sucks atm. as it fills the log with MSRs. */
9153 //if (LogIs3Enabled())
9154 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9155 }
9156 else
9157# endif
9158 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9159 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9160 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9161}
9162#endif /* LOG_ENABLED */
9163
9164
9165#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9166/**
9167 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9168 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9169 *
9170 * @returns Modified rcStrict.
9171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9172 * @param rcStrict The instruction execution status.
9173 */
9174static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9175{
9176 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9177 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9178 {
9179 /* VMX preemption timer takes priority over NMI-window exits. */
9180 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9181 {
9182 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9183 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9184 }
9185 /*
9186 * Check remaining intercepts.
9187 *
9188 * NMI-window and Interrupt-window VM-exits.
9189 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9190 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9191 *
9192 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9193 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9194 */
9195 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9196 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9197 && !TRPMHasTrap(pVCpu))
9198 {
9199 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9200 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9201 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9202 {
9203 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9204 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9205 }
9206 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9207 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9208 {
9209 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9210 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9211 }
9212 }
9213 }
9214 /* TPR-below threshold/APIC write has the highest priority. */
9215 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9216 {
9217 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9218 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9219 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9220 }
9221 /* MTF takes priority over VMX-preemption timer. */
9222 else
9223 {
9224 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9225 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9226 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9227 }
9228 return rcStrict;
9229}
9230#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9231
9232
9233/**
9234 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9235 * IEMExecOneWithPrefetchedByPC.
9236 *
9237 * Similar code is found in IEMExecLots.
9238 *
9239 * @return Strict VBox status code.
9240 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9241 * @param fExecuteInhibit If set, execute the instruction following CLI,
9242 * POP SS and MOV SS,GR.
9243 * @param pszFunction The calling function name.
9244 */
9245DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9246{
9247 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9248 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9249 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9250 RT_NOREF_PV(pszFunction);
9251
9252#ifdef IEM_WITH_SETJMP
9253 VBOXSTRICTRC rcStrict;
9254 IEM_TRY_SETJMP(pVCpu, rcStrict)
9255 {
9256 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9257 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9258 }
9259 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9260 {
9261 pVCpu->iem.s.cLongJumps++;
9262 }
9263 IEM_CATCH_LONGJMP_END(pVCpu);
9264#else
9265 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9266 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9267#endif
9268 if (rcStrict == VINF_SUCCESS)
9269 pVCpu->iem.s.cInstructions++;
9270 if (pVCpu->iem.s.cActiveMappings > 0)
9271 {
9272 Assert(rcStrict != VINF_SUCCESS);
9273 iemMemRollback(pVCpu);
9274 }
9275 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9276 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9277 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9278
9279//#ifdef DEBUG
9280// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9281//#endif
9282
9283#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9284 /*
9285 * Perform any VMX nested-guest instruction boundary actions.
9286 *
9287 * If any of these causes a VM-exit, we must skip executing the next
9288 * instruction (would run into stale page tables). A VM-exit makes sure
9289 * there is no interrupt-inhibition, so that should ensure we don't go
9290 * to try execute the next instruction. Clearing fExecuteInhibit is
9291 * problematic because of the setjmp/longjmp clobbering above.
9292 */
9293 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9294 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9295 || rcStrict != VINF_SUCCESS)
9296 { /* likely */ }
9297 else
9298 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9299#endif
9300
9301 /* Execute the next instruction as well if a cli, pop ss or
9302 mov ss, Gr has just completed successfully. */
9303 if ( fExecuteInhibit
9304 && rcStrict == VINF_SUCCESS
9305 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9306 {
9307 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9308 if (rcStrict == VINF_SUCCESS)
9309 {
9310#ifdef LOG_ENABLED
9311 iemLogCurInstr(pVCpu, false, pszFunction);
9312#endif
9313#ifdef IEM_WITH_SETJMP
9314 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9315 {
9316 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9317 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9318 }
9319 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9320 {
9321 pVCpu->iem.s.cLongJumps++;
9322 }
9323 IEM_CATCH_LONGJMP_END(pVCpu);
9324#else
9325 IEM_OPCODE_GET_FIRST_U8(&b);
9326 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9327#endif
9328 if (rcStrict == VINF_SUCCESS)
9329 {
9330 pVCpu->iem.s.cInstructions++;
9331#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9332 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9333 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9334 { /* likely */ }
9335 else
9336 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9337#endif
9338 }
9339 if (pVCpu->iem.s.cActiveMappings > 0)
9340 {
9341 Assert(rcStrict != VINF_SUCCESS);
9342 iemMemRollback(pVCpu);
9343 }
9344 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9345 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9346 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9347 }
9348 else if (pVCpu->iem.s.cActiveMappings > 0)
9349 iemMemRollback(pVCpu);
9350 /** @todo drop this after we bake this change into RIP advancing. */
9351 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9352 }
9353
9354 /*
9355 * Return value fiddling, statistics and sanity assertions.
9356 */
9357 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9358
9359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9361 return rcStrict;
9362}
9363
9364
9365/**
9366 * Execute one instruction.
9367 *
9368 * @return Strict VBox status code.
9369 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9370 */
9371VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9372{
9373 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9374#ifdef LOG_ENABLED
9375 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9376#endif
9377
9378 /*
9379 * Do the decoding and emulation.
9380 */
9381 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9382 if (rcStrict == VINF_SUCCESS)
9383 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9384 else if (pVCpu->iem.s.cActiveMappings > 0)
9385 iemMemRollback(pVCpu);
9386
9387 if (rcStrict != VINF_SUCCESS)
9388 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9389 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9390 return rcStrict;
9391}
9392
9393
9394VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9395{
9396 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9397 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9398 if (rcStrict == VINF_SUCCESS)
9399 {
9400 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9401 if (pcbWritten)
9402 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9403 }
9404 else if (pVCpu->iem.s.cActiveMappings > 0)
9405 iemMemRollback(pVCpu);
9406
9407 return rcStrict;
9408}
9409
9410
9411VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9412 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9413{
9414 VBOXSTRICTRC rcStrict;
9415 if ( cbOpcodeBytes
9416 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9417 {
9418 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9419#ifdef IEM_WITH_CODE_TLB
9420 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9421 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9422 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9423 pVCpu->iem.s.offCurInstrStart = 0;
9424 pVCpu->iem.s.offInstrNextByte = 0;
9425 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9426#else
9427 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9428 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9429#endif
9430 rcStrict = VINF_SUCCESS;
9431 }
9432 else
9433 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9434 if (rcStrict == VINF_SUCCESS)
9435 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9436 else if (pVCpu->iem.s.cActiveMappings > 0)
9437 iemMemRollback(pVCpu);
9438
9439 return rcStrict;
9440}
9441
9442
9443VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9444{
9445 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9446 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9447 if (rcStrict == VINF_SUCCESS)
9448 {
9449 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9450 if (pcbWritten)
9451 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9452 }
9453 else if (pVCpu->iem.s.cActiveMappings > 0)
9454 iemMemRollback(pVCpu);
9455
9456 return rcStrict;
9457}
9458
9459
9460VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9461 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9462{
9463 VBOXSTRICTRC rcStrict;
9464 if ( cbOpcodeBytes
9465 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9466 {
9467 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9468#ifdef IEM_WITH_CODE_TLB
9469 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9470 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9471 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9472 pVCpu->iem.s.offCurInstrStart = 0;
9473 pVCpu->iem.s.offInstrNextByte = 0;
9474 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9475#else
9476 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9477 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9478#endif
9479 rcStrict = VINF_SUCCESS;
9480 }
9481 else
9482 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9483 if (rcStrict == VINF_SUCCESS)
9484 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9485 else if (pVCpu->iem.s.cActiveMappings > 0)
9486 iemMemRollback(pVCpu);
9487
9488 return rcStrict;
9489}
9490
9491
9492/**
9493 * For handling split cacheline lock operations when the host has split-lock
9494 * detection enabled.
9495 *
9496 * This will cause the interpreter to disregard the lock prefix and implicit
9497 * locking (xchg).
9498 *
9499 * @returns Strict VBox status code.
9500 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9501 */
9502VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9503{
9504 /*
9505 * Do the decoding and emulation.
9506 */
9507 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9508 if (rcStrict == VINF_SUCCESS)
9509 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9510 else if (pVCpu->iem.s.cActiveMappings > 0)
9511 iemMemRollback(pVCpu);
9512
9513 if (rcStrict != VINF_SUCCESS)
9514 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9515 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9516 return rcStrict;
9517}
9518
9519
9520/**
9521 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9522 * inject a pending TRPM trap.
9523 */
9524VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9525{
9526 Assert(TRPMHasTrap(pVCpu));
9527
9528 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9529 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9530 {
9531 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9532#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9533 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9534 if (fIntrEnabled)
9535 {
9536 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9537 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9538 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9539 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9540 else
9541 {
9542 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9543 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9544 }
9545 }
9546#else
9547 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9548#endif
9549 if (fIntrEnabled)
9550 {
9551 uint8_t u8TrapNo;
9552 TRPMEVENT enmType;
9553 uint32_t uErrCode;
9554 RTGCPTR uCr2;
9555 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9556 AssertRC(rc2);
9557 Assert(enmType == TRPM_HARDWARE_INT);
9558 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9559
9560 TRPMResetTrap(pVCpu);
9561
9562#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9563 /* Injecting an event may cause a VM-exit. */
9564 if ( rcStrict != VINF_SUCCESS
9565 && rcStrict != VINF_IEM_RAISED_XCPT)
9566 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9567#else
9568 NOREF(rcStrict);
9569#endif
9570 }
9571 }
9572
9573 return VINF_SUCCESS;
9574}
9575
9576
9577VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9578{
9579 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9580 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9581 Assert(cMaxInstructions > 0);
9582
9583 /*
9584 * See if there is an interrupt pending in TRPM, inject it if we can.
9585 */
9586 /** @todo What if we are injecting an exception and not an interrupt? Is that
9587 * possible here? For now we assert it is indeed only an interrupt. */
9588 if (!TRPMHasTrap(pVCpu))
9589 { /* likely */ }
9590 else
9591 {
9592 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9593 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9594 { /*likely */ }
9595 else
9596 return rcStrict;
9597 }
9598
9599 /*
9600 * Initial decoder init w/ prefetch, then setup setjmp.
9601 */
9602 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9603 if (rcStrict == VINF_SUCCESS)
9604 {
9605#ifdef IEM_WITH_SETJMP
9606 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9607 IEM_TRY_SETJMP(pVCpu, rcStrict)
9608#endif
9609 {
9610 /*
9611 * The run loop. We limit ourselves to 4096 instructions right now.
9612 */
9613 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9614 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9615 for (;;)
9616 {
9617 /*
9618 * Log the state.
9619 */
9620#ifdef LOG_ENABLED
9621 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9622#endif
9623
9624 /*
9625 * Do the decoding and emulation.
9626 */
9627 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9628 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9629#ifdef VBOX_STRICT
9630 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9631#endif
9632 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9633 {
9634 Assert(pVCpu->iem.s.cActiveMappings == 0);
9635 pVCpu->iem.s.cInstructions++;
9636
9637#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9638 /* Perform any VMX nested-guest instruction boundary actions. */
9639 uint64_t fCpu = pVCpu->fLocalForcedActions;
9640 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9641 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9642 { /* likely */ }
9643 else
9644 {
9645 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9646 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9647 fCpu = pVCpu->fLocalForcedActions;
9648 else
9649 {
9650 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9651 break;
9652 }
9653 }
9654#endif
9655 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9656 {
9657#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9658 uint64_t fCpu = pVCpu->fLocalForcedActions;
9659#endif
9660 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9661 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9662 | VMCPU_FF_TLB_FLUSH
9663 | VMCPU_FF_UNHALT );
9664
9665 if (RT_LIKELY( ( !fCpu
9666 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9667 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9668 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9669 {
9670 if (--cMaxInstructionsGccStupidity > 0)
9671 {
9672 /* Poll timers every now an then according to the caller's specs. */
9673 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9674 || !TMTimerPollBool(pVM, pVCpu))
9675 {
9676 Assert(pVCpu->iem.s.cActiveMappings == 0);
9677 iemReInitDecoder(pVCpu);
9678 continue;
9679 }
9680 }
9681 }
9682 }
9683 Assert(pVCpu->iem.s.cActiveMappings == 0);
9684 }
9685 else if (pVCpu->iem.s.cActiveMappings > 0)
9686 iemMemRollback(pVCpu);
9687 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9688 break;
9689 }
9690 }
9691#ifdef IEM_WITH_SETJMP
9692 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9693 {
9694 if (pVCpu->iem.s.cActiveMappings > 0)
9695 iemMemRollback(pVCpu);
9696# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9697 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9698# endif
9699 pVCpu->iem.s.cLongJumps++;
9700 }
9701 IEM_CATCH_LONGJMP_END(pVCpu);
9702#endif
9703
9704 /*
9705 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9706 */
9707 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9708 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9709 }
9710 else
9711 {
9712 if (pVCpu->iem.s.cActiveMappings > 0)
9713 iemMemRollback(pVCpu);
9714
9715#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9716 /*
9717 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9718 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9719 */
9720 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9721#endif
9722 }
9723
9724 /*
9725 * Maybe re-enter raw-mode and log.
9726 */
9727 if (rcStrict != VINF_SUCCESS)
9728 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9729 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9730 if (pcInstructions)
9731 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9732 return rcStrict;
9733}
9734
9735
9736/**
9737 * Interface used by EMExecuteExec, does exit statistics and limits.
9738 *
9739 * @returns Strict VBox status code.
9740 * @param pVCpu The cross context virtual CPU structure.
9741 * @param fWillExit To be defined.
9742 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9743 * @param cMaxInstructions Maximum number of instructions to execute.
9744 * @param cMaxInstructionsWithoutExits
9745 * The max number of instructions without exits.
9746 * @param pStats Where to return statistics.
9747 */
9748VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9749 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9750{
9751 NOREF(fWillExit); /** @todo define flexible exit crits */
9752
9753 /*
9754 * Initialize return stats.
9755 */
9756 pStats->cInstructions = 0;
9757 pStats->cExits = 0;
9758 pStats->cMaxExitDistance = 0;
9759 pStats->cReserved = 0;
9760
9761 /*
9762 * Initial decoder init w/ prefetch, then setup setjmp.
9763 */
9764 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9765 if (rcStrict == VINF_SUCCESS)
9766 {
9767#ifdef IEM_WITH_SETJMP
9768 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9769 IEM_TRY_SETJMP(pVCpu, rcStrict)
9770#endif
9771 {
9772#ifdef IN_RING0
9773 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9774#endif
9775 uint32_t cInstructionSinceLastExit = 0;
9776
9777 /*
9778 * The run loop. We limit ourselves to 4096 instructions right now.
9779 */
9780 PVM pVM = pVCpu->CTX_SUFF(pVM);
9781 for (;;)
9782 {
9783 /*
9784 * Log the state.
9785 */
9786#ifdef LOG_ENABLED
9787 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9788#endif
9789
9790 /*
9791 * Do the decoding and emulation.
9792 */
9793 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9794
9795 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9796 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9797
9798 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9799 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9800 {
9801 pStats->cExits += 1;
9802 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9803 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9804 cInstructionSinceLastExit = 0;
9805 }
9806
9807 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9808 {
9809 Assert(pVCpu->iem.s.cActiveMappings == 0);
9810 pVCpu->iem.s.cInstructions++;
9811 pStats->cInstructions++;
9812 cInstructionSinceLastExit++;
9813
9814#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9815 /* Perform any VMX nested-guest instruction boundary actions. */
9816 uint64_t fCpu = pVCpu->fLocalForcedActions;
9817 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9818 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9819 { /* likely */ }
9820 else
9821 {
9822 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9823 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9824 fCpu = pVCpu->fLocalForcedActions;
9825 else
9826 {
9827 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9828 break;
9829 }
9830 }
9831#endif
9832 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9833 {
9834#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9835 uint64_t fCpu = pVCpu->fLocalForcedActions;
9836#endif
9837 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9838 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9839 | VMCPU_FF_TLB_FLUSH
9840 | VMCPU_FF_UNHALT );
9841 if (RT_LIKELY( ( ( !fCpu
9842 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9843 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9844 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9845 || pStats->cInstructions < cMinInstructions))
9846 {
9847 if (pStats->cInstructions < cMaxInstructions)
9848 {
9849 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9850 {
9851#ifdef IN_RING0
9852 if ( !fCheckPreemptionPending
9853 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9854#endif
9855 {
9856 Assert(pVCpu->iem.s.cActiveMappings == 0);
9857 iemReInitDecoder(pVCpu);
9858 continue;
9859 }
9860#ifdef IN_RING0
9861 rcStrict = VINF_EM_RAW_INTERRUPT;
9862 break;
9863#endif
9864 }
9865 }
9866 }
9867 Assert(!(fCpu & VMCPU_FF_IEM));
9868 }
9869 Assert(pVCpu->iem.s.cActiveMappings == 0);
9870 }
9871 else if (pVCpu->iem.s.cActiveMappings > 0)
9872 iemMemRollback(pVCpu);
9873 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9874 break;
9875 }
9876 }
9877#ifdef IEM_WITH_SETJMP
9878 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9879 {
9880 if (pVCpu->iem.s.cActiveMappings > 0)
9881 iemMemRollback(pVCpu);
9882 pVCpu->iem.s.cLongJumps++;
9883 }
9884 IEM_CATCH_LONGJMP_END(pVCpu);
9885#endif
9886
9887 /*
9888 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9889 */
9890 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9891 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9892 }
9893 else
9894 {
9895 if (pVCpu->iem.s.cActiveMappings > 0)
9896 iemMemRollback(pVCpu);
9897
9898#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9899 /*
9900 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9901 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9902 */
9903 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9904#endif
9905 }
9906
9907 /*
9908 * Maybe re-enter raw-mode and log.
9909 */
9910 if (rcStrict != VINF_SUCCESS)
9911 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9912 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9913 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9914 return rcStrict;
9915}
9916
9917
9918/**
9919 * Injects a trap, fault, abort, software interrupt or external interrupt.
9920 *
9921 * The parameter list matches TRPMQueryTrapAll pretty closely.
9922 *
9923 * @returns Strict VBox status code.
9924 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9925 * @param u8TrapNo The trap number.
9926 * @param enmType What type is it (trap/fault/abort), software
9927 * interrupt or hardware interrupt.
9928 * @param uErrCode The error code if applicable.
9929 * @param uCr2 The CR2 value if applicable.
9930 * @param cbInstr The instruction length (only relevant for
9931 * software interrupts).
9932 */
9933VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9934 uint8_t cbInstr)
9935{
9936 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9937#ifdef DBGFTRACE_ENABLED
9938 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9939 u8TrapNo, enmType, uErrCode, uCr2);
9940#endif
9941
9942 uint32_t fFlags;
9943 switch (enmType)
9944 {
9945 case TRPM_HARDWARE_INT:
9946 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9947 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9948 uErrCode = uCr2 = 0;
9949 break;
9950
9951 case TRPM_SOFTWARE_INT:
9952 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9953 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9954 uErrCode = uCr2 = 0;
9955 break;
9956
9957 case TRPM_TRAP:
9958 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
9959 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9960 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9961 if (u8TrapNo == X86_XCPT_PF)
9962 fFlags |= IEM_XCPT_FLAGS_CR2;
9963 switch (u8TrapNo)
9964 {
9965 case X86_XCPT_DF:
9966 case X86_XCPT_TS:
9967 case X86_XCPT_NP:
9968 case X86_XCPT_SS:
9969 case X86_XCPT_PF:
9970 case X86_XCPT_AC:
9971 case X86_XCPT_GP:
9972 fFlags |= IEM_XCPT_FLAGS_ERR;
9973 break;
9974 }
9975 break;
9976
9977 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9978 }
9979
9980 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9981
9982 if (pVCpu->iem.s.cActiveMappings > 0)
9983 iemMemRollback(pVCpu);
9984
9985 return rcStrict;
9986}
9987
9988
9989/**
9990 * Injects the active TRPM event.
9991 *
9992 * @returns Strict VBox status code.
9993 * @param pVCpu The cross context virtual CPU structure.
9994 */
9995VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9996{
9997#ifndef IEM_IMPLEMENTS_TASKSWITCH
9998 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9999#else
10000 uint8_t u8TrapNo;
10001 TRPMEVENT enmType;
10002 uint32_t uErrCode;
10003 RTGCUINTPTR uCr2;
10004 uint8_t cbInstr;
10005 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10006 if (RT_FAILURE(rc))
10007 return rc;
10008
10009 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10010 * ICEBP \#DB injection as a special case. */
10011 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10012#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10013 if (rcStrict == VINF_SVM_VMEXIT)
10014 rcStrict = VINF_SUCCESS;
10015#endif
10016#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10017 if (rcStrict == VINF_VMX_VMEXIT)
10018 rcStrict = VINF_SUCCESS;
10019#endif
10020 /** @todo Are there any other codes that imply the event was successfully
10021 * delivered to the guest? See @bugref{6607}. */
10022 if ( rcStrict == VINF_SUCCESS
10023 || rcStrict == VINF_IEM_RAISED_XCPT)
10024 TRPMResetTrap(pVCpu);
10025
10026 return rcStrict;
10027#endif
10028}
10029
10030
10031VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10032{
10033 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10034 return VERR_NOT_IMPLEMENTED;
10035}
10036
10037
10038VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10039{
10040 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10041 return VERR_NOT_IMPLEMENTED;
10042}
10043
10044
10045/**
10046 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10047 *
10048 * This API ASSUMES that the caller has already verified that the guest code is
10049 * allowed to access the I/O port. (The I/O port is in the DX register in the
10050 * guest state.)
10051 *
10052 * @returns Strict VBox status code.
10053 * @param pVCpu The cross context virtual CPU structure.
10054 * @param cbValue The size of the I/O port access (1, 2, or 4).
10055 * @param enmAddrMode The addressing mode.
10056 * @param fRepPrefix Indicates whether a repeat prefix is used
10057 * (doesn't matter which for this instruction).
10058 * @param cbInstr The instruction length in bytes.
10059 * @param iEffSeg The effective segment address.
10060 * @param fIoChecked Whether the access to the I/O port has been
10061 * checked or not. It's typically checked in the
10062 * HM scenario.
10063 */
10064VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10065 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10066{
10067 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10068 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10069
10070 /*
10071 * State init.
10072 */
10073 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10074
10075 /*
10076 * Switch orgy for getting to the right handler.
10077 */
10078 VBOXSTRICTRC rcStrict;
10079 if (fRepPrefix)
10080 {
10081 switch (enmAddrMode)
10082 {
10083 case IEMMODE_16BIT:
10084 switch (cbValue)
10085 {
10086 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10087 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10088 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10089 default:
10090 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10091 }
10092 break;
10093
10094 case IEMMODE_32BIT:
10095 switch (cbValue)
10096 {
10097 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10098 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10099 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10100 default:
10101 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10102 }
10103 break;
10104
10105 case IEMMODE_64BIT:
10106 switch (cbValue)
10107 {
10108 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10109 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10110 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10111 default:
10112 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10113 }
10114 break;
10115
10116 default:
10117 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10118 }
10119 }
10120 else
10121 {
10122 switch (enmAddrMode)
10123 {
10124 case IEMMODE_16BIT:
10125 switch (cbValue)
10126 {
10127 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10128 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10129 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10130 default:
10131 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10132 }
10133 break;
10134
10135 case IEMMODE_32BIT:
10136 switch (cbValue)
10137 {
10138 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10139 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10140 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10141 default:
10142 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10143 }
10144 break;
10145
10146 case IEMMODE_64BIT:
10147 switch (cbValue)
10148 {
10149 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10150 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10151 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10152 default:
10153 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10154 }
10155 break;
10156
10157 default:
10158 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10159 }
10160 }
10161
10162 if (pVCpu->iem.s.cActiveMappings)
10163 iemMemRollback(pVCpu);
10164
10165 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10166}
10167
10168
10169/**
10170 * Interface for HM and EM for executing string I/O IN (read) instructions.
10171 *
10172 * This API ASSUMES that the caller has already verified that the guest code is
10173 * allowed to access the I/O port. (The I/O port is in the DX register in the
10174 * guest state.)
10175 *
10176 * @returns Strict VBox status code.
10177 * @param pVCpu The cross context virtual CPU structure.
10178 * @param cbValue The size of the I/O port access (1, 2, or 4).
10179 * @param enmAddrMode The addressing mode.
10180 * @param fRepPrefix Indicates whether a repeat prefix is used
10181 * (doesn't matter which for this instruction).
10182 * @param cbInstr The instruction length in bytes.
10183 * @param fIoChecked Whether the access to the I/O port has been
10184 * checked or not. It's typically checked in the
10185 * HM scenario.
10186 */
10187VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10188 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10189{
10190 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10191
10192 /*
10193 * State init.
10194 */
10195 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10196
10197 /*
10198 * Switch orgy for getting to the right handler.
10199 */
10200 VBOXSTRICTRC rcStrict;
10201 if (fRepPrefix)
10202 {
10203 switch (enmAddrMode)
10204 {
10205 case IEMMODE_16BIT:
10206 switch (cbValue)
10207 {
10208 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10209 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10210 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10211 default:
10212 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10213 }
10214 break;
10215
10216 case IEMMODE_32BIT:
10217 switch (cbValue)
10218 {
10219 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10220 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10221 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10222 default:
10223 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10224 }
10225 break;
10226
10227 case IEMMODE_64BIT:
10228 switch (cbValue)
10229 {
10230 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10231 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10232 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10233 default:
10234 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10235 }
10236 break;
10237
10238 default:
10239 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10240 }
10241 }
10242 else
10243 {
10244 switch (enmAddrMode)
10245 {
10246 case IEMMODE_16BIT:
10247 switch (cbValue)
10248 {
10249 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10250 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10251 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10252 default:
10253 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10254 }
10255 break;
10256
10257 case IEMMODE_32BIT:
10258 switch (cbValue)
10259 {
10260 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10261 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10262 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10263 default:
10264 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10265 }
10266 break;
10267
10268 case IEMMODE_64BIT:
10269 switch (cbValue)
10270 {
10271 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10272 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10273 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10274 default:
10275 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10276 }
10277 break;
10278
10279 default:
10280 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10281 }
10282 }
10283
10284 if ( pVCpu->iem.s.cActiveMappings == 0
10285 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10286 { /* likely */ }
10287 else
10288 {
10289 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10290 iemMemRollback(pVCpu);
10291 }
10292 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10293}
10294
10295
10296/**
10297 * Interface for rawmode to write execute an OUT instruction.
10298 *
10299 * @returns Strict VBox status code.
10300 * @param pVCpu The cross context virtual CPU structure.
10301 * @param cbInstr The instruction length in bytes.
10302 * @param u16Port The port to read.
10303 * @param fImm Whether the port is specified using an immediate operand or
10304 * using the implicit DX register.
10305 * @param cbReg The register size.
10306 *
10307 * @remarks In ring-0 not all of the state needs to be synced in.
10308 */
10309VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10310{
10311 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10312 Assert(cbReg <= 4 && cbReg != 3);
10313
10314 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10315 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10316 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10317 Assert(!pVCpu->iem.s.cActiveMappings);
10318 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10319}
10320
10321
10322/**
10323 * Interface for rawmode to write execute an IN instruction.
10324 *
10325 * @returns Strict VBox status code.
10326 * @param pVCpu The cross context virtual CPU structure.
10327 * @param cbInstr The instruction length in bytes.
10328 * @param u16Port The port to read.
10329 * @param fImm Whether the port is specified using an immediate operand or
10330 * using the implicit DX.
10331 * @param cbReg The register size.
10332 */
10333VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10334{
10335 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10336 Assert(cbReg <= 4 && cbReg != 3);
10337
10338 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10339 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10340 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10341 Assert(!pVCpu->iem.s.cActiveMappings);
10342 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10343}
10344
10345
10346/**
10347 * Interface for HM and EM to write to a CRx register.
10348 *
10349 * @returns Strict VBox status code.
10350 * @param pVCpu The cross context virtual CPU structure.
10351 * @param cbInstr The instruction length in bytes.
10352 * @param iCrReg The control register number (destination).
10353 * @param iGReg The general purpose register number (source).
10354 *
10355 * @remarks In ring-0 not all of the state needs to be synced in.
10356 */
10357VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10358{
10359 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10360 Assert(iCrReg < 16);
10361 Assert(iGReg < 16);
10362
10363 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10364 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10365 Assert(!pVCpu->iem.s.cActiveMappings);
10366 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10367}
10368
10369
10370/**
10371 * Interface for HM and EM to read from a CRx register.
10372 *
10373 * @returns Strict VBox status code.
10374 * @param pVCpu The cross context virtual CPU structure.
10375 * @param cbInstr The instruction length in bytes.
10376 * @param iGReg The general purpose register number (destination).
10377 * @param iCrReg The control register number (source).
10378 *
10379 * @remarks In ring-0 not all of the state needs to be synced in.
10380 */
10381VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10382{
10383 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10384 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10385 | CPUMCTX_EXTRN_APIC_TPR);
10386 Assert(iCrReg < 16);
10387 Assert(iGReg < 16);
10388
10389 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10390 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10391 Assert(!pVCpu->iem.s.cActiveMappings);
10392 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10393}
10394
10395
10396/**
10397 * Interface for HM and EM to write to a DRx register.
10398 *
10399 * @returns Strict VBox status code.
10400 * @param pVCpu The cross context virtual CPU structure.
10401 * @param cbInstr The instruction length in bytes.
10402 * @param iDrReg The debug register number (destination).
10403 * @param iGReg The general purpose register number (source).
10404 *
10405 * @remarks In ring-0 not all of the state needs to be synced in.
10406 */
10407VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10408{
10409 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10410 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10411 Assert(iDrReg < 8);
10412 Assert(iGReg < 16);
10413
10414 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10415 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10416 Assert(!pVCpu->iem.s.cActiveMappings);
10417 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10418}
10419
10420
10421/**
10422 * Interface for HM and EM to read from a DRx register.
10423 *
10424 * @returns Strict VBox status code.
10425 * @param pVCpu The cross context virtual CPU structure.
10426 * @param cbInstr The instruction length in bytes.
10427 * @param iGReg The general purpose register number (destination).
10428 * @param iDrReg The debug register number (source).
10429 *
10430 * @remarks In ring-0 not all of the state needs to be synced in.
10431 */
10432VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10433{
10434 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10435 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10436 Assert(iDrReg < 8);
10437 Assert(iGReg < 16);
10438
10439 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10440 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10441 Assert(!pVCpu->iem.s.cActiveMappings);
10442 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10443}
10444
10445
10446/**
10447 * Interface for HM and EM to clear the CR0[TS] bit.
10448 *
10449 * @returns Strict VBox status code.
10450 * @param pVCpu The cross context virtual CPU structure.
10451 * @param cbInstr The instruction length in bytes.
10452 *
10453 * @remarks In ring-0 not all of the state needs to be synced in.
10454 */
10455VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10456{
10457 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10458
10459 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10460 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10461 Assert(!pVCpu->iem.s.cActiveMappings);
10462 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10463}
10464
10465
10466/**
10467 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10468 *
10469 * @returns Strict VBox status code.
10470 * @param pVCpu The cross context virtual CPU structure.
10471 * @param cbInstr The instruction length in bytes.
10472 * @param uValue The value to load into CR0.
10473 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10474 * memory operand. Otherwise pass NIL_RTGCPTR.
10475 *
10476 * @remarks In ring-0 not all of the state needs to be synced in.
10477 */
10478VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10479{
10480 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10481
10482 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10483 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10484 Assert(!pVCpu->iem.s.cActiveMappings);
10485 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10486}
10487
10488
10489/**
10490 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10491 *
10492 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10493 *
10494 * @returns Strict VBox status code.
10495 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10496 * @param cbInstr The instruction length in bytes.
10497 * @remarks In ring-0 not all of the state needs to be synced in.
10498 * @thread EMT(pVCpu)
10499 */
10500VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10501{
10502 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10503
10504 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10505 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10506 Assert(!pVCpu->iem.s.cActiveMappings);
10507 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10508}
10509
10510
10511/**
10512 * Interface for HM and EM to emulate the WBINVD instruction.
10513 *
10514 * @returns Strict VBox status code.
10515 * @param pVCpu The cross context virtual CPU structure.
10516 * @param cbInstr The instruction length in bytes.
10517 *
10518 * @remarks In ring-0 not all of the state needs to be synced in.
10519 */
10520VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10521{
10522 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10523
10524 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10525 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10526 Assert(!pVCpu->iem.s.cActiveMappings);
10527 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10528}
10529
10530
10531/**
10532 * Interface for HM and EM to emulate the INVD instruction.
10533 *
10534 * @returns Strict VBox status code.
10535 * @param pVCpu The cross context virtual CPU structure.
10536 * @param cbInstr The instruction length in bytes.
10537 *
10538 * @remarks In ring-0 not all of the state needs to be synced in.
10539 */
10540VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10541{
10542 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10543
10544 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10545 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10546 Assert(!pVCpu->iem.s.cActiveMappings);
10547 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10548}
10549
10550
10551/**
10552 * Interface for HM and EM to emulate the INVLPG instruction.
10553 *
10554 * @returns Strict VBox status code.
10555 * @retval VINF_PGM_SYNC_CR3
10556 *
10557 * @param pVCpu The cross context virtual CPU structure.
10558 * @param cbInstr The instruction length in bytes.
10559 * @param GCPtrPage The effective address of the page to invalidate.
10560 *
10561 * @remarks In ring-0 not all of the state needs to be synced in.
10562 */
10563VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10564{
10565 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10566
10567 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10568 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10569 Assert(!pVCpu->iem.s.cActiveMappings);
10570 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10571}
10572
10573
10574/**
10575 * Interface for HM and EM to emulate the INVPCID instruction.
10576 *
10577 * @returns Strict VBox status code.
10578 * @retval VINF_PGM_SYNC_CR3
10579 *
10580 * @param pVCpu The cross context virtual CPU structure.
10581 * @param cbInstr The instruction length in bytes.
10582 * @param iEffSeg The effective segment register.
10583 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10584 * @param uType The invalidation type.
10585 *
10586 * @remarks In ring-0 not all of the state needs to be synced in.
10587 */
10588VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10589 uint64_t uType)
10590{
10591 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10592
10593 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10594 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10595 Assert(!pVCpu->iem.s.cActiveMappings);
10596 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10597}
10598
10599
10600/**
10601 * Interface for HM and EM to emulate the CPUID instruction.
10602 *
10603 * @returns Strict VBox status code.
10604 *
10605 * @param pVCpu The cross context virtual CPU structure.
10606 * @param cbInstr The instruction length in bytes.
10607 *
10608 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10609 */
10610VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10611{
10612 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10613 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10614
10615 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10616 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10617 Assert(!pVCpu->iem.s.cActiveMappings);
10618 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10619}
10620
10621
10622/**
10623 * Interface for HM and EM to emulate the RDPMC instruction.
10624 *
10625 * @returns Strict VBox status code.
10626 *
10627 * @param pVCpu The cross context virtual CPU structure.
10628 * @param cbInstr The instruction length in bytes.
10629 *
10630 * @remarks Not all of the state needs to be synced in.
10631 */
10632VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10633{
10634 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10635 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10636
10637 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10638 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10639 Assert(!pVCpu->iem.s.cActiveMappings);
10640 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10641}
10642
10643
10644/**
10645 * Interface for HM and EM to emulate the RDTSC instruction.
10646 *
10647 * @returns Strict VBox status code.
10648 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10649 *
10650 * @param pVCpu The cross context virtual CPU structure.
10651 * @param cbInstr The instruction length in bytes.
10652 *
10653 * @remarks Not all of the state needs to be synced in.
10654 */
10655VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10656{
10657 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10658 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10659
10660 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10661 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10662 Assert(!pVCpu->iem.s.cActiveMappings);
10663 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10664}
10665
10666
10667/**
10668 * Interface for HM and EM to emulate the RDTSCP instruction.
10669 *
10670 * @returns Strict VBox status code.
10671 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10672 *
10673 * @param pVCpu The cross context virtual CPU structure.
10674 * @param cbInstr The instruction length in bytes.
10675 *
10676 * @remarks Not all of the state needs to be synced in. Recommended
10677 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10678 */
10679VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10680{
10681 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10682 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10683
10684 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10685 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10686 Assert(!pVCpu->iem.s.cActiveMappings);
10687 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10688}
10689
10690
10691/**
10692 * Interface for HM and EM to emulate the RDMSR instruction.
10693 *
10694 * @returns Strict VBox status code.
10695 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10696 *
10697 * @param pVCpu The cross context virtual CPU structure.
10698 * @param cbInstr The instruction length in bytes.
10699 *
10700 * @remarks Not all of the state needs to be synced in. Requires RCX and
10701 * (currently) all MSRs.
10702 */
10703VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10704{
10705 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10706 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10707
10708 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10709 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10710 Assert(!pVCpu->iem.s.cActiveMappings);
10711 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10712}
10713
10714
10715/**
10716 * Interface for HM and EM to emulate the WRMSR instruction.
10717 *
10718 * @returns Strict VBox status code.
10719 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10720 *
10721 * @param pVCpu The cross context virtual CPU structure.
10722 * @param cbInstr The instruction length in bytes.
10723 *
10724 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10725 * and (currently) all MSRs.
10726 */
10727VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10728{
10729 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10730 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10731 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10732
10733 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10734 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10735 Assert(!pVCpu->iem.s.cActiveMappings);
10736 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10737}
10738
10739
10740/**
10741 * Interface for HM and EM to emulate the MONITOR instruction.
10742 *
10743 * @returns Strict VBox status code.
10744 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10745 *
10746 * @param pVCpu The cross context virtual CPU structure.
10747 * @param cbInstr The instruction length in bytes.
10748 *
10749 * @remarks Not all of the state needs to be synced in.
10750 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10751 * are used.
10752 */
10753VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10754{
10755 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10756 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10757
10758 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10759 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10760 Assert(!pVCpu->iem.s.cActiveMappings);
10761 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10762}
10763
10764
10765/**
10766 * Interface for HM and EM to emulate the MWAIT instruction.
10767 *
10768 * @returns Strict VBox status code.
10769 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10770 *
10771 * @param pVCpu The cross context virtual CPU structure.
10772 * @param cbInstr The instruction length in bytes.
10773 *
10774 * @remarks Not all of the state needs to be synced in.
10775 */
10776VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10777{
10778 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10779 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10780
10781 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10782 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10783 Assert(!pVCpu->iem.s.cActiveMappings);
10784 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10785}
10786
10787
10788/**
10789 * Interface for HM and EM to emulate the HLT instruction.
10790 *
10791 * @returns Strict VBox status code.
10792 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10793 *
10794 * @param pVCpu The cross context virtual CPU structure.
10795 * @param cbInstr The instruction length in bytes.
10796 *
10797 * @remarks Not all of the state needs to be synced in.
10798 */
10799VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10800{
10801 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10802
10803 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10804 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10805 Assert(!pVCpu->iem.s.cActiveMappings);
10806 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10807}
10808
10809
10810/**
10811 * Checks if IEM is in the process of delivering an event (interrupt or
10812 * exception).
10813 *
10814 * @returns true if we're in the process of raising an interrupt or exception,
10815 * false otherwise.
10816 * @param pVCpu The cross context virtual CPU structure.
10817 * @param puVector Where to store the vector associated with the
10818 * currently delivered event, optional.
10819 * @param pfFlags Where to store th event delivery flags (see
10820 * IEM_XCPT_FLAGS_XXX), optional.
10821 * @param puErr Where to store the error code associated with the
10822 * event, optional.
10823 * @param puCr2 Where to store the CR2 associated with the event,
10824 * optional.
10825 * @remarks The caller should check the flags to determine if the error code and
10826 * CR2 are valid for the event.
10827 */
10828VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10829{
10830 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10831 if (fRaisingXcpt)
10832 {
10833 if (puVector)
10834 *puVector = pVCpu->iem.s.uCurXcpt;
10835 if (pfFlags)
10836 *pfFlags = pVCpu->iem.s.fCurXcpt;
10837 if (puErr)
10838 *puErr = pVCpu->iem.s.uCurXcptErr;
10839 if (puCr2)
10840 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10841 }
10842 return fRaisingXcpt;
10843}
10844
10845#ifdef IN_RING3
10846
10847/**
10848 * Handles the unlikely and probably fatal merge cases.
10849 *
10850 * @returns Merged status code.
10851 * @param rcStrict Current EM status code.
10852 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10853 * with @a rcStrict.
10854 * @param iMemMap The memory mapping index. For error reporting only.
10855 * @param pVCpu The cross context virtual CPU structure of the calling
10856 * thread, for error reporting only.
10857 */
10858DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10859 unsigned iMemMap, PVMCPUCC pVCpu)
10860{
10861 if (RT_FAILURE_NP(rcStrict))
10862 return rcStrict;
10863
10864 if (RT_FAILURE_NP(rcStrictCommit))
10865 return rcStrictCommit;
10866
10867 if (rcStrict == rcStrictCommit)
10868 return rcStrictCommit;
10869
10870 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10871 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10872 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10873 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10874 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10875 return VERR_IOM_FF_STATUS_IPE;
10876}
10877
10878
10879/**
10880 * Helper for IOMR3ProcessForceFlag.
10881 *
10882 * @returns Merged status code.
10883 * @param rcStrict Current EM status code.
10884 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10885 * with @a rcStrict.
10886 * @param iMemMap The memory mapping index. For error reporting only.
10887 * @param pVCpu The cross context virtual CPU structure of the calling
10888 * thread, for error reporting only.
10889 */
10890DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10891{
10892 /* Simple. */
10893 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10894 return rcStrictCommit;
10895
10896 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10897 return rcStrict;
10898
10899 /* EM scheduling status codes. */
10900 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10901 && rcStrict <= VINF_EM_LAST))
10902 {
10903 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10904 && rcStrictCommit <= VINF_EM_LAST))
10905 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10906 }
10907
10908 /* Unlikely */
10909 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10910}
10911
10912
10913/**
10914 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10915 *
10916 * @returns Merge between @a rcStrict and what the commit operation returned.
10917 * @param pVM The cross context VM structure.
10918 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10919 * @param rcStrict The status code returned by ring-0 or raw-mode.
10920 */
10921VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10922{
10923 /*
10924 * Reset the pending commit.
10925 */
10926 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10927 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10928 ("%#x %#x %#x\n",
10929 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10930 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10931
10932 /*
10933 * Commit the pending bounce buffers (usually just one).
10934 */
10935 unsigned cBufs = 0;
10936 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10937 while (iMemMap-- > 0)
10938 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10939 {
10940 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10941 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10942 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10943
10944 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10945 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10946 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10947
10948 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10949 {
10950 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10951 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10952 pbBuf,
10953 cbFirst,
10954 PGMACCESSORIGIN_IEM);
10955 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10956 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10957 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10958 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10959 }
10960
10961 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10962 {
10963 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10964 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10965 pbBuf + cbFirst,
10966 cbSecond,
10967 PGMACCESSORIGIN_IEM);
10968 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10969 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10970 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10971 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10972 }
10973 cBufs++;
10974 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10975 }
10976
10977 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10978 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10979 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10980 pVCpu->iem.s.cActiveMappings = 0;
10981 return rcStrict;
10982}
10983
10984#endif /* IN_RING3 */
10985
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette