VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 103417

Last change on this file since 103417 was 103417, checked in by vboxsync, 9 months ago

Devices/Graphics,VMM: Fix some unused function warnings, bugref:3409

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 448.1 KB
Line 
1/* $Id: IEMAll.cpp 103417 2024-02-19 08:44:55Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gim.h>
134#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
135# include <VBox/vmm/em.h>
136# include <VBox/vmm/hm_svm.h>
137#endif
138#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
139# include <VBox/vmm/hmvmxinline.h>
140#endif
141#include <VBox/vmm/tm.h>
142#include <VBox/vmm/dbgf.h>
143#include <VBox/vmm/dbgftrace.h>
144#include "IEMInternal.h"
145#include <VBox/vmm/vmcc.h>
146#include <VBox/log.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/dis.h>
150#include <iprt/asm-math.h>
151#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
152# include <iprt/asm-amd64-x86.h>
153#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
154# include <iprt/asm-arm.h>
155#endif
156#include <iprt/assert.h>
157#include <iprt/string.h>
158#include <iprt/x86.h>
159
160#include "IEMInline.h"
161
162
163/*********************************************************************************************************************************
164* Structures and Typedefs *
165*********************************************************************************************************************************/
166/**
167 * CPU exception classes.
168 */
169typedef enum IEMXCPTCLASS
170{
171 IEMXCPTCLASS_BENIGN,
172 IEMXCPTCLASS_CONTRIBUTORY,
173 IEMXCPTCLASS_PAGE_FAULT,
174 IEMXCPTCLASS_DOUBLE_FAULT
175} IEMXCPTCLASS;
176
177
178/*********************************************************************************************************************************
179* Global Variables *
180*********************************************************************************************************************************/
181#if defined(IEM_LOG_MEMORY_WRITES)
182/** What IEM just wrote. */
183uint8_t g_abIemWrote[256];
184/** How much IEM just wrote. */
185size_t g_cbIemWrote;
186#endif
187
188
189/*********************************************************************************************************************************
190* Internal Functions *
191*********************************************************************************************************************************/
192static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
193 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
194
195
196/**
197 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
198 * path.
199 *
200 * @returns IEM_F_BRK_PENDING_XXX or zero.
201 * @param pVCpu The cross context virtual CPU structure of the
202 * calling thread.
203 *
204 * @note Don't call directly, use iemCalcExecDbgFlags instead.
205 */
206uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
207{
208 uint32_t fExec = 0;
209
210 /*
211 * Process guest breakpoints.
212 */
213#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
214 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
215 { \
216 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
217 { \
218 case X86_DR7_RW_EO: \
219 fExec |= IEM_F_PENDING_BRK_INSTR; \
220 break; \
221 case X86_DR7_RW_WO: \
222 case X86_DR7_RW_RW: \
223 fExec |= IEM_F_PENDING_BRK_DATA; \
224 break; \
225 case X86_DR7_RW_IO: \
226 fExec |= IEM_F_PENDING_BRK_X86_IO; \
227 break; \
228 } \
229 } \
230 } while (0)
231
232 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
233 if (fGstDr7 & X86_DR7_ENABLED_MASK)
234 {
235 PROCESS_ONE_BP(fGstDr7, 0);
236 PROCESS_ONE_BP(fGstDr7, 1);
237 PROCESS_ONE_BP(fGstDr7, 2);
238 PROCESS_ONE_BP(fGstDr7, 3);
239 }
240
241 /*
242 * Process hypervisor breakpoints.
243 */
244 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
245 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
246 {
247 PROCESS_ONE_BP(fHyperDr7, 0);
248 PROCESS_ONE_BP(fHyperDr7, 1);
249 PROCESS_ONE_BP(fHyperDr7, 2);
250 PROCESS_ONE_BP(fHyperDr7, 3);
251 }
252
253 return fExec;
254}
255
256
257/**
258 * Initializes the decoder state.
259 *
260 * iemReInitDecoder is mostly a copy of this function.
261 *
262 * @param pVCpu The cross context virtual CPU structure of the
263 * calling thread.
264 * @param fExecOpts Optional execution flags:
265 * - IEM_F_BYPASS_HANDLERS
266 * - IEM_F_X86_DISREGARD_LOCK
267 */
268DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
269{
270 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
271 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
280
281 /* Execution state: */
282 uint32_t fExec;
283 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
284
285 /* Decoder state: */
286 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
288 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
289 {
290 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
291 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
292 }
293 else
294 {
295 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
296 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
297 }
298 pVCpu->iem.s.fPrefixes = 0;
299 pVCpu->iem.s.uRexReg = 0;
300 pVCpu->iem.s.uRexB = 0;
301 pVCpu->iem.s.uRexIndex = 0;
302 pVCpu->iem.s.idxPrefix = 0;
303 pVCpu->iem.s.uVex3rdReg = 0;
304 pVCpu->iem.s.uVexLength = 0;
305 pVCpu->iem.s.fEvexStuff = 0;
306 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
307#ifdef IEM_WITH_CODE_TLB
308 pVCpu->iem.s.pbInstrBuf = NULL;
309 pVCpu->iem.s.offInstrNextByte = 0;
310 pVCpu->iem.s.offCurInstrStart = 0;
311# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
312 pVCpu->iem.s.offOpcode = 0;
313# endif
314# ifdef VBOX_STRICT
315 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
316 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
317 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
318 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
319# endif
320#else
321 pVCpu->iem.s.offOpcode = 0;
322 pVCpu->iem.s.cbOpcode = 0;
323#endif
324 pVCpu->iem.s.offModRm = 0;
325 pVCpu->iem.s.cActiveMappings = 0;
326 pVCpu->iem.s.iNextMapping = 0;
327 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
328
329#ifdef DBGFTRACE_ENABLED
330 switch (IEM_GET_CPU_MODE(pVCpu))
331 {
332 case IEMMODE_64BIT:
333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
334 break;
335 case IEMMODE_32BIT:
336 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
337 break;
338 case IEMMODE_16BIT:
339 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
340 break;
341 }
342#endif
343}
344
345
346/**
347 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
348 *
349 * This is mostly a copy of iemInitDecoder.
350 *
351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
352 */
353DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
354{
355 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
364
365 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
366 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
367 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
368
369 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
370 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
371 pVCpu->iem.s.enmEffAddrMode = enmMode;
372 if (enmMode != IEMMODE_64BIT)
373 {
374 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
375 pVCpu->iem.s.enmEffOpSize = enmMode;
376 }
377 else
378 {
379 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
380 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
381 }
382 pVCpu->iem.s.fPrefixes = 0;
383 pVCpu->iem.s.uRexReg = 0;
384 pVCpu->iem.s.uRexB = 0;
385 pVCpu->iem.s.uRexIndex = 0;
386 pVCpu->iem.s.idxPrefix = 0;
387 pVCpu->iem.s.uVex3rdReg = 0;
388 pVCpu->iem.s.uVexLength = 0;
389 pVCpu->iem.s.fEvexStuff = 0;
390 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
391#ifdef IEM_WITH_CODE_TLB
392 if (pVCpu->iem.s.pbInstrBuf)
393 {
394 uint64_t off = (enmMode == IEMMODE_64BIT
395 ? pVCpu->cpum.GstCtx.rip
396 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
397 - pVCpu->iem.s.uInstrBufPc;
398 if (off < pVCpu->iem.s.cbInstrBufTotal)
399 {
400 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
401 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
402 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
403 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
404 else
405 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
406 }
407 else
408 {
409 pVCpu->iem.s.pbInstrBuf = NULL;
410 pVCpu->iem.s.offInstrNextByte = 0;
411 pVCpu->iem.s.offCurInstrStart = 0;
412 pVCpu->iem.s.cbInstrBuf = 0;
413 pVCpu->iem.s.cbInstrBufTotal = 0;
414 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
415 }
416 }
417 else
418 {
419 pVCpu->iem.s.offInstrNextByte = 0;
420 pVCpu->iem.s.offCurInstrStart = 0;
421 pVCpu->iem.s.cbInstrBuf = 0;
422 pVCpu->iem.s.cbInstrBufTotal = 0;
423# ifdef VBOX_STRICT
424 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
425# endif
426 }
427# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
428 pVCpu->iem.s.offOpcode = 0;
429# endif
430#else /* !IEM_WITH_CODE_TLB */
431 pVCpu->iem.s.cbOpcode = 0;
432 pVCpu->iem.s.offOpcode = 0;
433#endif /* !IEM_WITH_CODE_TLB */
434 pVCpu->iem.s.offModRm = 0;
435 Assert(pVCpu->iem.s.cActiveMappings == 0);
436 pVCpu->iem.s.iNextMapping = 0;
437 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
438 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
439
440#ifdef DBGFTRACE_ENABLED
441 switch (enmMode)
442 {
443 case IEMMODE_64BIT:
444 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
445 break;
446 case IEMMODE_32BIT:
447 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
448 break;
449 case IEMMODE_16BIT:
450 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
451 break;
452 }
453#endif
454}
455
456
457
458/**
459 * Prefetch opcodes the first time when starting executing.
460 *
461 * @returns Strict VBox status code.
462 * @param pVCpu The cross context virtual CPU structure of the
463 * calling thread.
464 * @param fExecOpts Optional execution flags:
465 * - IEM_F_BYPASS_HANDLERS
466 * - IEM_F_X86_DISREGARD_LOCK
467 */
468static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
469{
470 iemInitDecoder(pVCpu, fExecOpts);
471
472#ifndef IEM_WITH_CODE_TLB
473 /*
474 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
475 *
476 * First translate CS:rIP to a physical address.
477 *
478 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
479 * all relevant bytes from the first page, as it ASSUMES it's only ever
480 * called for dealing with CS.LIM, page crossing and instructions that
481 * are too long.
482 */
483 uint32_t cbToTryRead;
484 RTGCPTR GCPtrPC;
485 if (IEM_IS_64BIT_CODE(pVCpu))
486 {
487 cbToTryRead = GUEST_PAGE_SIZE;
488 GCPtrPC = pVCpu->cpum.GstCtx.rip;
489 if (IEM_IS_CANONICAL(GCPtrPC))
490 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
491 else
492 return iemRaiseGeneralProtectionFault0(pVCpu);
493 }
494 else
495 {
496 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
497 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
498 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
499 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
500 else
501 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
502 if (cbToTryRead) { /* likely */ }
503 else /* overflowed */
504 {
505 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
506 cbToTryRead = UINT32_MAX;
507 }
508 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
509 Assert(GCPtrPC <= UINT32_MAX);
510 }
511
512 PGMPTWALK Walk;
513 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
514 if (RT_SUCCESS(rc))
515 Assert(Walk.fSucceeded); /* probable. */
516 else
517 {
518 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
519# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
520 if (Walk.fFailed & PGM_WALKFAIL_EPT)
521 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
522# endif
523 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
524 }
525 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
526 else
527 {
528 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
529# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
530 if (Walk.fFailed & PGM_WALKFAIL_EPT)
531 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
532# endif
533 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
534 }
535 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
536 else
537 {
538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
539# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
540 if (Walk.fFailed & PGM_WALKFAIL_EPT)
541 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
542# endif
543 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
544 }
545 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
546 /** @todo Check reserved bits and such stuff. PGM is better at doing
547 * that, so do it when implementing the guest virtual address
548 * TLB... */
549
550 /*
551 * Read the bytes at this address.
552 */
553 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
554 if (cbToTryRead > cbLeftOnPage)
555 cbToTryRead = cbLeftOnPage;
556 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
557 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
558
559 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
560 {
561 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
562 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
563 { /* likely */ }
564 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
565 {
566 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
567 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
568 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
569 }
570 else
571 {
572 Log((RT_SUCCESS(rcStrict)
573 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
574 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
575 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
576 return rcStrict;
577 }
578 }
579 else
580 {
581 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
582 if (RT_SUCCESS(rc))
583 { /* likely */ }
584 else
585 {
586 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
587 GCPtrPC, GCPhys, rc, cbToTryRead));
588 return rc;
589 }
590 }
591 pVCpu->iem.s.cbOpcode = cbToTryRead;
592#endif /* !IEM_WITH_CODE_TLB */
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Invalidates the IEM TLBs.
599 *
600 * This is called internally as well as by PGM when moving GC mappings.
601 *
602 * @param pVCpu The cross context virtual CPU structure of the calling
603 * thread.
604 */
605VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
606{
607#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
608 Log10(("IEMTlbInvalidateAll\n"));
609# ifdef IEM_WITH_CODE_TLB
610 pVCpu->iem.s.cbInstrBufTotal = 0;
611 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
612 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
613 { /* very likely */ }
614 else
615 {
616 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
617 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
618 while (i-- > 0)
619 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
620 }
621# endif
622
623# ifdef IEM_WITH_DATA_TLB
624 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
625 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
626 { /* very likely */ }
627 else
628 {
629 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
630 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
631 while (i-- > 0)
632 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
633 }
634# endif
635#else
636 RT_NOREF(pVCpu);
637#endif
638}
639
640
641/**
642 * Invalidates a page in the TLBs.
643 *
644 * @param pVCpu The cross context virtual CPU structure of the calling
645 * thread.
646 * @param GCPtr The address of the page to invalidate
647 * @thread EMT(pVCpu)
648 */
649VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
653 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
654 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
655 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
656
657# ifdef IEM_WITH_CODE_TLB
658 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
659 {
660 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
661 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
662 pVCpu->iem.s.cbInstrBufTotal = 0;
663 }
664# endif
665
666# ifdef IEM_WITH_DATA_TLB
667 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
668 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
669# endif
670#else
671 NOREF(pVCpu); NOREF(GCPtr);
672#endif
673}
674
675
676#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
677/**
678 * Invalid both TLBs slow fashion following a rollover.
679 *
680 * Worker for IEMTlbInvalidateAllPhysical,
681 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
682 * iemMemMapJmp and others.
683 *
684 * @thread EMT(pVCpu)
685 */
686static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
687{
688 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
689 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
690 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
691
692 unsigned i;
693# ifdef IEM_WITH_CODE_TLB
694 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
695 while (i-- > 0)
696 {
697 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
698 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
699 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
700 }
701# endif
702# ifdef IEM_WITH_DATA_TLB
703 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
704 while (i-- > 0)
705 {
706 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
707 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
708 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
709 }
710# endif
711
712}
713#endif
714
715
716/**
717 * Invalidates the host physical aspects of the IEM TLBs.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 * @note Currently not used.
724 */
725VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
726{
727#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
728 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
729 Log10(("IEMTlbInvalidateAllPhysical\n"));
730
731# ifdef IEM_WITH_CODE_TLB
732 pVCpu->iem.s.cbInstrBufTotal = 0;
733# endif
734 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
735 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
736 {
737 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
738 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
739 }
740 else
741 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
742#else
743 NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates the host physical aspects of the IEM TLBs.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVM The cross context VM structure.
754 * @param idCpuCaller The ID of the calling EMT if available to the caller,
755 * otherwise NIL_VMCPUID.
756 * @param enmReason The reason we're called.
757 *
758 * @remarks Caller holds the PGM lock.
759 */
760VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
761{
762#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
763 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
764 if (pVCpuCaller)
765 VMCPU_ASSERT_EMT(pVCpuCaller);
766 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
767
768 VMCC_FOR_EACH_VMCPU(pVM)
769 {
770# ifdef IEM_WITH_CODE_TLB
771 if (pVCpuCaller == pVCpu)
772 pVCpu->iem.s.cbInstrBufTotal = 0;
773# endif
774
775 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
776 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
777 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
778 { /* likely */}
779 else if (pVCpuCaller != pVCpu)
780 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
781 else
782 {
783 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
784 continue;
785 }
786 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
787 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
788 }
789 VMCC_FOR_EACH_VMCPU_END(pVM);
790
791#else
792 RT_NOREF(pVM, idCpuCaller, enmReason);
793#endif
794}
795
796
797/**
798 * Flushes the prefetch buffer, light version.
799 */
800void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
801{
802#ifndef IEM_WITH_CODE_TLB
803 pVCpu->iem.s.cbOpcode = cbInstr;
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810/**
811 * Flushes the prefetch buffer, heavy version.
812 */
813void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
814{
815#ifndef IEM_WITH_CODE_TLB
816 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
817#elif 1
818 pVCpu->iem.s.cbInstrBufTotal = 0;
819 RT_NOREF(cbInstr);
820#else
821 RT_NOREF(pVCpu, cbInstr);
822#endif
823}
824
825
826
827#ifdef IEM_WITH_CODE_TLB
828
829/**
830 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
831 * failure and jumps.
832 *
833 * We end up here for a number of reasons:
834 * - pbInstrBuf isn't yet initialized.
835 * - Advancing beyond the buffer boundrary (e.g. cross page).
836 * - Advancing beyond the CS segment limit.
837 * - Fetching from non-mappable page (e.g. MMIO).
838 *
839 * @param pVCpu The cross context virtual CPU structure of the
840 * calling thread.
841 * @param pvDst Where to return the bytes.
842 * @param cbDst Number of bytes to read. A value of zero is
843 * allowed for initializing pbInstrBuf (the
844 * recompiler does this). In this case it is best
845 * to set pbInstrBuf to NULL prior to the call.
846 */
847void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
848{
849# ifdef IN_RING3
850 for (;;)
851 {
852 Assert(cbDst <= 8);
853 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
854
855 /*
856 * We might have a partial buffer match, deal with that first to make the
857 * rest simpler. This is the first part of the cross page/buffer case.
858 */
859 if (pVCpu->iem.s.pbInstrBuf != NULL)
860 {
861 if (offBuf < pVCpu->iem.s.cbInstrBuf)
862 {
863 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
864 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
865 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
866
867 cbDst -= cbCopy;
868 pvDst = (uint8_t *)pvDst + cbCopy;
869 offBuf += cbCopy;
870 pVCpu->iem.s.offInstrNextByte += offBuf;
871 }
872 }
873
874 /*
875 * Check segment limit, figuring how much we're allowed to access at this point.
876 *
877 * We will fault immediately if RIP is past the segment limit / in non-canonical
878 * territory. If we do continue, there are one or more bytes to read before we
879 * end up in trouble and we need to do that first before faulting.
880 */
881 RTGCPTR GCPtrFirst;
882 uint32_t cbMaxRead;
883 if (IEM_IS_64BIT_CODE(pVCpu))
884 {
885 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
886 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
887 { /* likely */ }
888 else
889 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
890 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
891 }
892 else
893 {
894 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
895 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
896 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
897 { /* likely */ }
898 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
899 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
900 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
901 if (cbMaxRead != 0)
902 { /* likely */ }
903 else
904 {
905 /* Overflowed because address is 0 and limit is max. */
906 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
907 cbMaxRead = X86_PAGE_SIZE;
908 }
909 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
910 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
911 if (cbMaxRead2 < cbMaxRead)
912 cbMaxRead = cbMaxRead2;
913 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
914 }
915
916 /*
917 * Get the TLB entry for this piece of code.
918 */
919 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
920 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
921 if (pTlbe->uTag == uTag)
922 {
923 /* likely when executing lots of code, otherwise unlikely */
924# ifdef VBOX_WITH_STATISTICS
925 pVCpu->iem.s.CodeTlb.cTlbHits++;
926# endif
927 }
928 else
929 {
930 pVCpu->iem.s.CodeTlb.cTlbMisses++;
931 PGMPTWALK Walk;
932 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
933 if (RT_FAILURE(rc))
934 {
935#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
936 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
937 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
938#endif
939 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
940 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
941 }
942
943 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
944 Assert(Walk.fSucceeded);
945 pTlbe->uTag = uTag;
946 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
947 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
948 pTlbe->GCPhys = Walk.GCPhys;
949 pTlbe->pbMappingR3 = NULL;
950 }
951
952 /*
953 * Check TLB page table level access flags.
954 */
955 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
956 {
957 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
958 {
959 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
960 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
961 }
962 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
963 {
964 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
965 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
966 }
967 }
968
969 /*
970 * Set the accessed flags.
971 * ASSUMES this is set when the address is translated rather than on commit...
972 */
973 /** @todo testcase: check when the A bit are actually set by the CPU for code. */
974 if (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED)
975 {
976 int rc2 = PGMGstModifyPage(pVCpu, GCPtrFirst, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
977 AssertRC(rc2);
978 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
979 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
980 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_F_PT_NO_ACCESSED;
981 }
982
983 /*
984 * Look up the physical page info if necessary.
985 */
986 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
987 { /* not necessary */ }
988 else
989 {
990 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
991 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
992 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
993 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
994 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
995 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
996 { /* likely */ }
997 else
998 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
999 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1000 | IEMTLBE_F_NO_MAPPINGR3
1001 | IEMTLBE_F_PG_NO_READ
1002 | IEMTLBE_F_PG_NO_WRITE
1003 | IEMTLBE_F_PG_UNASSIGNED
1004 | IEMTLBE_F_PG_CODE_PAGE);
1005 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1006 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1007 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1008 }
1009
1010# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1011 /*
1012 * Try do a direct read using the pbMappingR3 pointer.
1013 */
1014 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1015 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1016 {
1017 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1018 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1019 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1020 {
1021 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1022 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1023 }
1024 else
1025 {
1026 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1027 if (cbInstr + (uint32_t)cbDst <= 15)
1028 {
1029 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1030 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1031 }
1032 else
1033 {
1034 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1035 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1036 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1037 }
1038 }
1039 if (cbDst <= cbMaxRead)
1040 {
1041 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1042 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1043
1044 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1045 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1046 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1047 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1048 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1049 return;
1050 }
1051 pVCpu->iem.s.pbInstrBuf = NULL;
1052
1053 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1054 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1055 }
1056# else
1057# error "refactor as needed"
1058 /*
1059 * If there is no special read handling, so we can read a bit more and
1060 * put it in the prefetch buffer.
1061 */
1062 if ( cbDst < cbMaxRead
1063 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1064 {
1065 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1066 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1067 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1068 { /* likely */ }
1069 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1070 {
1071 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1072 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1073 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1074 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1075 }
1076 else
1077 {
1078 Log((RT_SUCCESS(rcStrict)
1079 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1080 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1081 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1082 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1083 }
1084 }
1085# endif
1086 /*
1087 * Special read handling, so only read exactly what's needed.
1088 * This is a highly unlikely scenario.
1089 */
1090 else
1091 {
1092 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1093
1094 /* Check instruction length. */
1095 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1096 if (RT_LIKELY(cbInstr + cbDst <= 15))
1097 { /* likely */ }
1098 else
1099 {
1100 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1101 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1102 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1103 }
1104
1105 /* Do the reading. */
1106 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1107 if (cbToRead > 0)
1108 {
1109 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1110 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1111 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1112 { /* likely */ }
1113 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1114 {
1115 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1116 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1117 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1118 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1119 }
1120 else
1121 {
1122 Log((RT_SUCCESS(rcStrict)
1123 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1124 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1125 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1126 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1127 }
1128 }
1129
1130 /* Update the state and probably return. */
1131 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1132 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1133 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1134
1135 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1136 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1137 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1138 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1139 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1140 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1141 pVCpu->iem.s.pbInstrBuf = NULL;
1142 if (cbToRead == cbDst)
1143 return;
1144 }
1145
1146 /*
1147 * More to read, loop.
1148 */
1149 cbDst -= cbMaxRead;
1150 pvDst = (uint8_t *)pvDst + cbMaxRead;
1151 }
1152# else /* !IN_RING3 */
1153 RT_NOREF(pvDst, cbDst);
1154 if (pvDst || cbDst)
1155 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1156# endif /* !IN_RING3 */
1157}
1158
1159#else /* !IEM_WITH_CODE_TLB */
1160
1161/**
1162 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1163 * exception if it fails.
1164 *
1165 * @returns Strict VBox status code.
1166 * @param pVCpu The cross context virtual CPU structure of the
1167 * calling thread.
1168 * @param cbMin The minimum number of bytes relative offOpcode
1169 * that must be read.
1170 */
1171VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1172{
1173 /*
1174 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1175 *
1176 * First translate CS:rIP to a physical address.
1177 */
1178 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1179 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1180 uint8_t const cbLeft = cbOpcode - offOpcode;
1181 Assert(cbLeft < cbMin);
1182 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1183
1184 uint32_t cbToTryRead;
1185 RTGCPTR GCPtrNext;
1186 if (IEM_IS_64BIT_CODE(pVCpu))
1187 {
1188 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1189 if (!IEM_IS_CANONICAL(GCPtrNext))
1190 return iemRaiseGeneralProtectionFault0(pVCpu);
1191 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1192 }
1193 else
1194 {
1195 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1196 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1197 GCPtrNext32 += cbOpcode;
1198 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1199 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1200 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1201 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1202 if (!cbToTryRead) /* overflowed */
1203 {
1204 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1205 cbToTryRead = UINT32_MAX;
1206 /** @todo check out wrapping around the code segment. */
1207 }
1208 if (cbToTryRead < cbMin - cbLeft)
1209 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1210 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1211
1212 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1213 if (cbToTryRead > cbLeftOnPage)
1214 cbToTryRead = cbLeftOnPage;
1215 }
1216
1217 /* Restrict to opcode buffer space.
1218
1219 We're making ASSUMPTIONS here based on work done previously in
1220 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1221 be fetched in case of an instruction crossing two pages. */
1222 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1223 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1224 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1225 { /* likely */ }
1226 else
1227 {
1228 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1229 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1230 return iemRaiseGeneralProtectionFault0(pVCpu);
1231 }
1232
1233 PGMPTWALK Walk;
1234 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1235 if (RT_FAILURE(rc))
1236 {
1237 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1238#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1239 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1240 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1241#endif
1242 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1243 }
1244 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1245 {
1246 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1247#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1248 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1249 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1250#endif
1251 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1252 }
1253 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1254 {
1255 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1256#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1257 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1258 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1259#endif
1260 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1261 }
1262 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1263 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1264 /** @todo Check reserved bits and such stuff. PGM is better at doing
1265 * that, so do it when implementing the guest virtual address
1266 * TLB... */
1267
1268 /*
1269 * Read the bytes at this address.
1270 *
1271 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1272 * and since PATM should only patch the start of an instruction there
1273 * should be no need to check again here.
1274 */
1275 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1276 {
1277 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1278 cbToTryRead, PGMACCESSORIGIN_IEM);
1279 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1280 { /* likely */ }
1281 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1282 {
1283 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1284 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1285 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1286 }
1287 else
1288 {
1289 Log((RT_SUCCESS(rcStrict)
1290 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1291 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1292 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1293 return rcStrict;
1294 }
1295 }
1296 else
1297 {
1298 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1299 if (RT_SUCCESS(rc))
1300 { /* likely */ }
1301 else
1302 {
1303 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1304 return rc;
1305 }
1306 }
1307 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1308 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1309
1310 return VINF_SUCCESS;
1311}
1312
1313#endif /* !IEM_WITH_CODE_TLB */
1314#ifndef IEM_WITH_SETJMP
1315
1316/**
1317 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1318 *
1319 * @returns Strict VBox status code.
1320 * @param pVCpu The cross context virtual CPU structure of the
1321 * calling thread.
1322 * @param pb Where to return the opcode byte.
1323 */
1324VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1325{
1326 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1327 if (rcStrict == VINF_SUCCESS)
1328 {
1329 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1330 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1331 pVCpu->iem.s.offOpcode = offOpcode + 1;
1332 }
1333 else
1334 *pb = 0;
1335 return rcStrict;
1336}
1337
1338#else /* IEM_WITH_SETJMP */
1339
1340/**
1341 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1342 *
1343 * @returns The opcode byte.
1344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1345 */
1346uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1347{
1348# ifdef IEM_WITH_CODE_TLB
1349 uint8_t u8;
1350 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1351 return u8;
1352# else
1353 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1354 if (rcStrict == VINF_SUCCESS)
1355 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1356 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1357# endif
1358}
1359
1360#endif /* IEM_WITH_SETJMP */
1361
1362#ifndef IEM_WITH_SETJMP
1363
1364/**
1365 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1366 *
1367 * @returns Strict VBox status code.
1368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1369 * @param pu16 Where to return the opcode dword.
1370 */
1371VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1372{
1373 uint8_t u8;
1374 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1375 if (rcStrict == VINF_SUCCESS)
1376 *pu16 = (int8_t)u8;
1377 return rcStrict;
1378}
1379
1380
1381/**
1382 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1383 *
1384 * @returns Strict VBox status code.
1385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1386 * @param pu32 Where to return the opcode dword.
1387 */
1388VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1389{
1390 uint8_t u8;
1391 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1392 if (rcStrict == VINF_SUCCESS)
1393 *pu32 = (int8_t)u8;
1394 return rcStrict;
1395}
1396
1397
1398/**
1399 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1400 *
1401 * @returns Strict VBox status code.
1402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1403 * @param pu64 Where to return the opcode qword.
1404 */
1405VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1406{
1407 uint8_t u8;
1408 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1409 if (rcStrict == VINF_SUCCESS)
1410 *pu64 = (int8_t)u8;
1411 return rcStrict;
1412}
1413
1414#endif /* !IEM_WITH_SETJMP */
1415
1416
1417#ifndef IEM_WITH_SETJMP
1418
1419/**
1420 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1421 *
1422 * @returns Strict VBox status code.
1423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1424 * @param pu16 Where to return the opcode word.
1425 */
1426VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1427{
1428 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1429 if (rcStrict == VINF_SUCCESS)
1430 {
1431 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1432# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1433 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1434# else
1435 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1436# endif
1437 pVCpu->iem.s.offOpcode = offOpcode + 2;
1438 }
1439 else
1440 *pu16 = 0;
1441 return rcStrict;
1442}
1443
1444#else /* IEM_WITH_SETJMP */
1445
1446/**
1447 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1448 *
1449 * @returns The opcode word.
1450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1451 */
1452uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1453{
1454# ifdef IEM_WITH_CODE_TLB
1455 uint16_t u16;
1456 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1457 return u16;
1458# else
1459 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1460 if (rcStrict == VINF_SUCCESS)
1461 {
1462 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1463 pVCpu->iem.s.offOpcode += 2;
1464# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1465 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1466# else
1467 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1468# endif
1469 }
1470 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1471# endif
1472}
1473
1474#endif /* IEM_WITH_SETJMP */
1475
1476#ifndef IEM_WITH_SETJMP
1477
1478/**
1479 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1480 *
1481 * @returns Strict VBox status code.
1482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1483 * @param pu32 Where to return the opcode double word.
1484 */
1485VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1486{
1487 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1488 if (rcStrict == VINF_SUCCESS)
1489 {
1490 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1491 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1492 pVCpu->iem.s.offOpcode = offOpcode + 2;
1493 }
1494 else
1495 *pu32 = 0;
1496 return rcStrict;
1497}
1498
1499
1500/**
1501 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1502 *
1503 * @returns Strict VBox status code.
1504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1505 * @param pu64 Where to return the opcode quad word.
1506 */
1507VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1508{
1509 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1510 if (rcStrict == VINF_SUCCESS)
1511 {
1512 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1513 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1514 pVCpu->iem.s.offOpcode = offOpcode + 2;
1515 }
1516 else
1517 *pu64 = 0;
1518 return rcStrict;
1519}
1520
1521#endif /* !IEM_WITH_SETJMP */
1522
1523#ifndef IEM_WITH_SETJMP
1524
1525/**
1526 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1527 *
1528 * @returns Strict VBox status code.
1529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1530 * @param pu32 Where to return the opcode dword.
1531 */
1532VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1533{
1534 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1535 if (rcStrict == VINF_SUCCESS)
1536 {
1537 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1538# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1539 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1540# else
1541 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1542 pVCpu->iem.s.abOpcode[offOpcode + 1],
1543 pVCpu->iem.s.abOpcode[offOpcode + 2],
1544 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1545# endif
1546 pVCpu->iem.s.offOpcode = offOpcode + 4;
1547 }
1548 else
1549 *pu32 = 0;
1550 return rcStrict;
1551}
1552
1553#else /* IEM_WITH_SETJMP */
1554
1555/**
1556 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1557 *
1558 * @returns The opcode dword.
1559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1560 */
1561uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1562{
1563# ifdef IEM_WITH_CODE_TLB
1564 uint32_t u32;
1565 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1566 return u32;
1567# else
1568 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1569 if (rcStrict == VINF_SUCCESS)
1570 {
1571 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1572 pVCpu->iem.s.offOpcode = offOpcode + 4;
1573# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1574 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1575# else
1576 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1577 pVCpu->iem.s.abOpcode[offOpcode + 1],
1578 pVCpu->iem.s.abOpcode[offOpcode + 2],
1579 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1580# endif
1581 }
1582 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1583# endif
1584}
1585
1586#endif /* IEM_WITH_SETJMP */
1587
1588#ifndef IEM_WITH_SETJMP
1589
1590/**
1591 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1592 *
1593 * @returns Strict VBox status code.
1594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1595 * @param pu64 Where to return the opcode dword.
1596 */
1597VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1598{
1599 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1600 if (rcStrict == VINF_SUCCESS)
1601 {
1602 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1603 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1604 pVCpu->iem.s.abOpcode[offOpcode + 1],
1605 pVCpu->iem.s.abOpcode[offOpcode + 2],
1606 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1607 pVCpu->iem.s.offOpcode = offOpcode + 4;
1608 }
1609 else
1610 *pu64 = 0;
1611 return rcStrict;
1612}
1613
1614
1615/**
1616 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1617 *
1618 * @returns Strict VBox status code.
1619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1620 * @param pu64 Where to return the opcode qword.
1621 */
1622VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1623{
1624 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1625 if (rcStrict == VINF_SUCCESS)
1626 {
1627 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1628 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1629 pVCpu->iem.s.abOpcode[offOpcode + 1],
1630 pVCpu->iem.s.abOpcode[offOpcode + 2],
1631 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1632 pVCpu->iem.s.offOpcode = offOpcode + 4;
1633 }
1634 else
1635 *pu64 = 0;
1636 return rcStrict;
1637}
1638
1639#endif /* !IEM_WITH_SETJMP */
1640
1641#ifndef IEM_WITH_SETJMP
1642
1643/**
1644 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1645 *
1646 * @returns Strict VBox status code.
1647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1648 * @param pu64 Where to return the opcode qword.
1649 */
1650VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1651{
1652 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1653 if (rcStrict == VINF_SUCCESS)
1654 {
1655 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1656# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1657 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1658# else
1659 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1660 pVCpu->iem.s.abOpcode[offOpcode + 1],
1661 pVCpu->iem.s.abOpcode[offOpcode + 2],
1662 pVCpu->iem.s.abOpcode[offOpcode + 3],
1663 pVCpu->iem.s.abOpcode[offOpcode + 4],
1664 pVCpu->iem.s.abOpcode[offOpcode + 5],
1665 pVCpu->iem.s.abOpcode[offOpcode + 6],
1666 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1667# endif
1668 pVCpu->iem.s.offOpcode = offOpcode + 8;
1669 }
1670 else
1671 *pu64 = 0;
1672 return rcStrict;
1673}
1674
1675#else /* IEM_WITH_SETJMP */
1676
1677/**
1678 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1679 *
1680 * @returns The opcode qword.
1681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1682 */
1683uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1684{
1685# ifdef IEM_WITH_CODE_TLB
1686 uint64_t u64;
1687 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1688 return u64;
1689# else
1690 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1691 if (rcStrict == VINF_SUCCESS)
1692 {
1693 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1694 pVCpu->iem.s.offOpcode = offOpcode + 8;
1695# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1696 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1697# else
1698 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1699 pVCpu->iem.s.abOpcode[offOpcode + 1],
1700 pVCpu->iem.s.abOpcode[offOpcode + 2],
1701 pVCpu->iem.s.abOpcode[offOpcode + 3],
1702 pVCpu->iem.s.abOpcode[offOpcode + 4],
1703 pVCpu->iem.s.abOpcode[offOpcode + 5],
1704 pVCpu->iem.s.abOpcode[offOpcode + 6],
1705 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1706# endif
1707 }
1708 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1709# endif
1710}
1711
1712#endif /* IEM_WITH_SETJMP */
1713
1714
1715
1716/** @name Misc Worker Functions.
1717 * @{
1718 */
1719
1720/**
1721 * Gets the exception class for the specified exception vector.
1722 *
1723 * @returns The class of the specified exception.
1724 * @param uVector The exception vector.
1725 */
1726static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1727{
1728 Assert(uVector <= X86_XCPT_LAST);
1729 switch (uVector)
1730 {
1731 case X86_XCPT_DE:
1732 case X86_XCPT_TS:
1733 case X86_XCPT_NP:
1734 case X86_XCPT_SS:
1735 case X86_XCPT_GP:
1736 case X86_XCPT_SX: /* AMD only */
1737 return IEMXCPTCLASS_CONTRIBUTORY;
1738
1739 case X86_XCPT_PF:
1740 case X86_XCPT_VE: /* Intel only */
1741 return IEMXCPTCLASS_PAGE_FAULT;
1742
1743 case X86_XCPT_DF:
1744 return IEMXCPTCLASS_DOUBLE_FAULT;
1745 }
1746 return IEMXCPTCLASS_BENIGN;
1747}
1748
1749
1750/**
1751 * Evaluates how to handle an exception caused during delivery of another event
1752 * (exception / interrupt).
1753 *
1754 * @returns How to handle the recursive exception.
1755 * @param pVCpu The cross context virtual CPU structure of the
1756 * calling thread.
1757 * @param fPrevFlags The flags of the previous event.
1758 * @param uPrevVector The vector of the previous event.
1759 * @param fCurFlags The flags of the current exception.
1760 * @param uCurVector The vector of the current exception.
1761 * @param pfXcptRaiseInfo Where to store additional information about the
1762 * exception condition. Optional.
1763 */
1764VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1765 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1766{
1767 /*
1768 * Only CPU exceptions can be raised while delivering other events, software interrupt
1769 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1770 */
1771 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1772 Assert(pVCpu); RT_NOREF(pVCpu);
1773 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1774
1775 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1776 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1777 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1778 {
1779 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1780 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1781 {
1782 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1783 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1784 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1785 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1786 {
1787 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1788 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1789 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1790 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1791 uCurVector, pVCpu->cpum.GstCtx.cr2));
1792 }
1793 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1794 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1795 {
1796 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1797 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1798 }
1799 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1800 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1801 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1802 {
1803 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1804 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1805 }
1806 }
1807 else
1808 {
1809 if (uPrevVector == X86_XCPT_NMI)
1810 {
1811 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1812 if (uCurVector == X86_XCPT_PF)
1813 {
1814 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1815 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1816 }
1817 }
1818 else if ( uPrevVector == X86_XCPT_AC
1819 && uCurVector == X86_XCPT_AC)
1820 {
1821 enmRaise = IEMXCPTRAISE_CPU_HANG;
1822 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1823 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1824 }
1825 }
1826 }
1827 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1828 {
1829 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1830 if (uCurVector == X86_XCPT_PF)
1831 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1832 }
1833 else
1834 {
1835 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1836 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1837 }
1838
1839 if (pfXcptRaiseInfo)
1840 *pfXcptRaiseInfo = fRaiseInfo;
1841 return enmRaise;
1842}
1843
1844
1845/**
1846 * Enters the CPU shutdown state initiated by a triple fault or other
1847 * unrecoverable conditions.
1848 *
1849 * @returns Strict VBox status code.
1850 * @param pVCpu The cross context virtual CPU structure of the
1851 * calling thread.
1852 */
1853static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1854{
1855 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1856 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1857
1858 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1859 {
1860 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1861 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1862 }
1863
1864 RT_NOREF(pVCpu);
1865 return VINF_EM_TRIPLE_FAULT;
1866}
1867
1868
1869/**
1870 * Validates a new SS segment.
1871 *
1872 * @returns VBox strict status code.
1873 * @param pVCpu The cross context virtual CPU structure of the
1874 * calling thread.
1875 * @param NewSS The new SS selctor.
1876 * @param uCpl The CPL to load the stack for.
1877 * @param pDesc Where to return the descriptor.
1878 */
1879static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1880{
1881 /* Null selectors are not allowed (we're not called for dispatching
1882 interrupts with SS=0 in long mode). */
1883 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1884 {
1885 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1886 return iemRaiseTaskSwitchFault0(pVCpu);
1887 }
1888
1889 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1890 if ((NewSS & X86_SEL_RPL) != uCpl)
1891 {
1892 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1893 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1894 }
1895
1896 /*
1897 * Read the descriptor.
1898 */
1899 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1900 if (rcStrict != VINF_SUCCESS)
1901 return rcStrict;
1902
1903 /*
1904 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1905 */
1906 if (!pDesc->Legacy.Gen.u1DescType)
1907 {
1908 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1909 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1910 }
1911
1912 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1913 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1914 {
1915 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1916 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1917 }
1918 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1919 {
1920 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1921 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1922 }
1923
1924 /* Is it there? */
1925 /** @todo testcase: Is this checked before the canonical / limit check below? */
1926 if (!pDesc->Legacy.Gen.u1Present)
1927 {
1928 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1929 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1930 }
1931
1932 return VINF_SUCCESS;
1933}
1934
1935/** @} */
1936
1937
1938/** @name Raising Exceptions.
1939 *
1940 * @{
1941 */
1942
1943
1944/**
1945 * Loads the specified stack far pointer from the TSS.
1946 *
1947 * @returns VBox strict status code.
1948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1949 * @param uCpl The CPL to load the stack for.
1950 * @param pSelSS Where to return the new stack segment.
1951 * @param puEsp Where to return the new stack pointer.
1952 */
1953static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1954{
1955 VBOXSTRICTRC rcStrict;
1956 Assert(uCpl < 4);
1957
1958 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1959 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1960 {
1961 /*
1962 * 16-bit TSS (X86TSS16).
1963 */
1964 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1965 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1966 {
1967 uint32_t off = uCpl * 4 + 2;
1968 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1969 {
1970 /** @todo check actual access pattern here. */
1971 uint32_t u32Tmp = 0; /* gcc maybe... */
1972 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1973 if (rcStrict == VINF_SUCCESS)
1974 {
1975 *puEsp = RT_LOWORD(u32Tmp);
1976 *pSelSS = RT_HIWORD(u32Tmp);
1977 return VINF_SUCCESS;
1978 }
1979 }
1980 else
1981 {
1982 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1983 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1984 }
1985 break;
1986 }
1987
1988 /*
1989 * 32-bit TSS (X86TSS32).
1990 */
1991 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1992 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1993 {
1994 uint32_t off = uCpl * 8 + 4;
1995 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1996 {
1997/** @todo check actual access pattern here. */
1998 uint64_t u64Tmp;
1999 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2000 if (rcStrict == VINF_SUCCESS)
2001 {
2002 *puEsp = u64Tmp & UINT32_MAX;
2003 *pSelSS = (RTSEL)(u64Tmp >> 32);
2004 return VINF_SUCCESS;
2005 }
2006 }
2007 else
2008 {
2009 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2010 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2011 }
2012 break;
2013 }
2014
2015 default:
2016 AssertFailed();
2017 rcStrict = VERR_IEM_IPE_4;
2018 break;
2019 }
2020
2021 *puEsp = 0; /* make gcc happy */
2022 *pSelSS = 0; /* make gcc happy */
2023 return rcStrict;
2024}
2025
2026
2027/**
2028 * Loads the specified stack pointer from the 64-bit TSS.
2029 *
2030 * @returns VBox strict status code.
2031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2032 * @param uCpl The CPL to load the stack for.
2033 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2034 * @param puRsp Where to return the new stack pointer.
2035 */
2036static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2037{
2038 Assert(uCpl < 4);
2039 Assert(uIst < 8);
2040 *puRsp = 0; /* make gcc happy */
2041
2042 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2043 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2044
2045 uint32_t off;
2046 if (uIst)
2047 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2048 else
2049 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2050 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2051 {
2052 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2053 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2054 }
2055
2056 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2057}
2058
2059
2060/**
2061 * Adjust the CPU state according to the exception being raised.
2062 *
2063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2064 * @param u8Vector The exception that has been raised.
2065 */
2066DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2067{
2068 switch (u8Vector)
2069 {
2070 case X86_XCPT_DB:
2071 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2072 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2073 break;
2074 /** @todo Read the AMD and Intel exception reference... */
2075 }
2076}
2077
2078
2079/**
2080 * Implements exceptions and interrupts for real mode.
2081 *
2082 * @returns VBox strict status code.
2083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2084 * @param cbInstr The number of bytes to offset rIP by in the return
2085 * address.
2086 * @param u8Vector The interrupt / exception vector number.
2087 * @param fFlags The flags.
2088 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2089 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2090 */
2091static VBOXSTRICTRC
2092iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2093 uint8_t cbInstr,
2094 uint8_t u8Vector,
2095 uint32_t fFlags,
2096 uint16_t uErr,
2097 uint64_t uCr2) RT_NOEXCEPT
2098{
2099 NOREF(uErr); NOREF(uCr2);
2100 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2101
2102 /*
2103 * Read the IDT entry.
2104 */
2105 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2106 {
2107 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2108 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2109 }
2110 RTFAR16 Idte;
2111 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2112 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2113 {
2114 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2115 return rcStrict;
2116 }
2117
2118#ifdef LOG_ENABLED
2119 /* If software interrupt, try decode it if logging is enabled and such. */
2120 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2121 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2122 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2123#endif
2124
2125 /*
2126 * Push the stack frame.
2127 */
2128 uint8_t bUnmapInfo;
2129 uint16_t *pu16Frame;
2130 uint64_t uNewRsp;
2131 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2132 if (rcStrict != VINF_SUCCESS)
2133 return rcStrict;
2134
2135 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2136#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2137 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2138 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2139 fEfl |= UINT16_C(0xf000);
2140#endif
2141 pu16Frame[2] = (uint16_t)fEfl;
2142 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2143 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2144 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2145 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2146 return rcStrict;
2147
2148 /*
2149 * Load the vector address into cs:ip and make exception specific state
2150 * adjustments.
2151 */
2152 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2153 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2154 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2155 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2156 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2157 pVCpu->cpum.GstCtx.rip = Idte.off;
2158 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2159 IEMMISC_SET_EFL(pVCpu, fEfl);
2160
2161 /** @todo do we actually do this in real mode? */
2162 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2163 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2164
2165 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2166 so best leave them alone in case we're in a weird kind of real mode... */
2167
2168 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2169}
2170
2171
2172/**
2173 * Loads a NULL data selector into when coming from V8086 mode.
2174 *
2175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2176 * @param pSReg Pointer to the segment register.
2177 */
2178DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2179{
2180 pSReg->Sel = 0;
2181 pSReg->ValidSel = 0;
2182 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2183 {
2184 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2185 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2186 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2187 }
2188 else
2189 {
2190 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2191 /** @todo check this on AMD-V */
2192 pSReg->u64Base = 0;
2193 pSReg->u32Limit = 0;
2194 }
2195}
2196
2197
2198/**
2199 * Loads a segment selector during a task switch in V8086 mode.
2200 *
2201 * @param pSReg Pointer to the segment register.
2202 * @param uSel The selector value to load.
2203 */
2204DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2205{
2206 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2207 pSReg->Sel = uSel;
2208 pSReg->ValidSel = uSel;
2209 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2210 pSReg->u64Base = uSel << 4;
2211 pSReg->u32Limit = 0xffff;
2212 pSReg->Attr.u = 0xf3;
2213}
2214
2215
2216/**
2217 * Loads a segment selector during a task switch in protected mode.
2218 *
2219 * In this task switch scenario, we would throw \#TS exceptions rather than
2220 * \#GPs.
2221 *
2222 * @returns VBox strict status code.
2223 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2224 * @param pSReg Pointer to the segment register.
2225 * @param uSel The new selector value.
2226 *
2227 * @remarks This does _not_ handle CS or SS.
2228 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2229 */
2230static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2231{
2232 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2233
2234 /* Null data selector. */
2235 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2236 {
2237 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2238 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2239 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2240 return VINF_SUCCESS;
2241 }
2242
2243 /* Fetch the descriptor. */
2244 IEMSELDESC Desc;
2245 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2246 if (rcStrict != VINF_SUCCESS)
2247 {
2248 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2249 VBOXSTRICTRC_VAL(rcStrict)));
2250 return rcStrict;
2251 }
2252
2253 /* Must be a data segment or readable code segment. */
2254 if ( !Desc.Legacy.Gen.u1DescType
2255 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2256 {
2257 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2258 Desc.Legacy.Gen.u4Type));
2259 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2260 }
2261
2262 /* Check privileges for data segments and non-conforming code segments. */
2263 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2264 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2265 {
2266 /* The RPL and the new CPL must be less than or equal to the DPL. */
2267 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2268 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2269 {
2270 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2271 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2272 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2273 }
2274 }
2275
2276 /* Is it there? */
2277 if (!Desc.Legacy.Gen.u1Present)
2278 {
2279 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2280 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2281 }
2282
2283 /* The base and limit. */
2284 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2285 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2286
2287 /*
2288 * Ok, everything checked out fine. Now set the accessed bit before
2289 * committing the result into the registers.
2290 */
2291 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2292 {
2293 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2294 if (rcStrict != VINF_SUCCESS)
2295 return rcStrict;
2296 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2297 }
2298
2299 /* Commit */
2300 pSReg->Sel = uSel;
2301 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2302 pSReg->u32Limit = cbLimit;
2303 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2304 pSReg->ValidSel = uSel;
2305 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2306 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2307 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2308
2309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2310 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2311 return VINF_SUCCESS;
2312}
2313
2314
2315/**
2316 * Performs a task switch.
2317 *
2318 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2319 * caller is responsible for performing the necessary checks (like DPL, TSS
2320 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2321 * reference for JMP, CALL, IRET.
2322 *
2323 * If the task switch is the due to a software interrupt or hardware exception,
2324 * the caller is responsible for validating the TSS selector and descriptor. See
2325 * Intel Instruction reference for INT n.
2326 *
2327 * @returns VBox strict status code.
2328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2329 * @param enmTaskSwitch The cause of the task switch.
2330 * @param uNextEip The EIP effective after the task switch.
2331 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2332 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2333 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2334 * @param SelTss The TSS selector of the new task.
2335 * @param pNewDescTss Pointer to the new TSS descriptor.
2336 */
2337VBOXSTRICTRC
2338iemTaskSwitch(PVMCPUCC pVCpu,
2339 IEMTASKSWITCH enmTaskSwitch,
2340 uint32_t uNextEip,
2341 uint32_t fFlags,
2342 uint16_t uErr,
2343 uint64_t uCr2,
2344 RTSEL SelTss,
2345 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2346{
2347 Assert(!IEM_IS_REAL_MODE(pVCpu));
2348 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2349 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2350
2351 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2352 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2353 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2354 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2355 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2356
2357 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2358 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2359
2360 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2361 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2362
2363 /* Update CR2 in case it's a page-fault. */
2364 /** @todo This should probably be done much earlier in IEM/PGM. See
2365 * @bugref{5653#c49}. */
2366 if (fFlags & IEM_XCPT_FLAGS_CR2)
2367 pVCpu->cpum.GstCtx.cr2 = uCr2;
2368
2369 /*
2370 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2371 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2372 */
2373 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2374 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2375 if (uNewTssLimit < uNewTssLimitMin)
2376 {
2377 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2378 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2379 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2380 }
2381
2382 /*
2383 * Task switches in VMX non-root mode always cause task switches.
2384 * The new TSS must have been read and validated (DPL, limits etc.) before a
2385 * task-switch VM-exit commences.
2386 *
2387 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2388 */
2389 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2390 {
2391 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2392 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2393 }
2394
2395 /*
2396 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2397 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2398 */
2399 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2400 {
2401 uint64_t const uExitInfo1 = SelTss;
2402 uint64_t uExitInfo2 = uErr;
2403 switch (enmTaskSwitch)
2404 {
2405 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2406 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2407 default: break;
2408 }
2409 if (fFlags & IEM_XCPT_FLAGS_ERR)
2410 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2411 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2412 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2413
2414 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2415 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2416 RT_NOREF2(uExitInfo1, uExitInfo2);
2417 }
2418
2419 /*
2420 * Check the current TSS limit. The last written byte to the current TSS during the
2421 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2422 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2423 *
2424 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2425 * end up with smaller than "legal" TSS limits.
2426 */
2427 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2428 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2429 if (uCurTssLimit < uCurTssLimitMin)
2430 {
2431 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2432 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2433 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2434 }
2435
2436 /*
2437 * Verify that the new TSS can be accessed and map it. Map only the required contents
2438 * and not the entire TSS.
2439 */
2440 uint8_t bUnmapInfoNewTss;
2441 void *pvNewTss;
2442 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2443 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2444 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2445 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2446 * not perform correct translation if this happens. See Intel spec. 7.2.1
2447 * "Task-State Segment". */
2448 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2449/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2450 * Consider wrapping the remainder into a function for simpler cleanup. */
2451 if (rcStrict != VINF_SUCCESS)
2452 {
2453 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2454 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2455 return rcStrict;
2456 }
2457
2458 /*
2459 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2460 */
2461 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2462 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2463 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2464 {
2465 uint8_t bUnmapInfoDescCurTss;
2466 PX86DESC pDescCurTss;
2467 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2468 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2469 if (rcStrict != VINF_SUCCESS)
2470 {
2471 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2472 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2473 return rcStrict;
2474 }
2475
2476 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2477 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2478 if (rcStrict != VINF_SUCCESS)
2479 {
2480 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2481 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2482 return rcStrict;
2483 }
2484
2485 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2486 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2487 {
2488 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2489 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2490 fEFlags &= ~X86_EFL_NT;
2491 }
2492 }
2493
2494 /*
2495 * Save the CPU state into the current TSS.
2496 */
2497 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2498 if (GCPtrNewTss == GCPtrCurTss)
2499 {
2500 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2501 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2502 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2503 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2504 pVCpu->cpum.GstCtx.ldtr.Sel));
2505 }
2506 if (fIsNewTss386)
2507 {
2508 /*
2509 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2510 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2511 */
2512 uint8_t bUnmapInfoCurTss32;
2513 void *pvCurTss32;
2514 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2515 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2516 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2517 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2518 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2519 if (rcStrict != VINF_SUCCESS)
2520 {
2521 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2522 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2523 return rcStrict;
2524 }
2525
2526 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2527 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2528 pCurTss32->eip = uNextEip;
2529 pCurTss32->eflags = fEFlags;
2530 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2531 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2532 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2533 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2534 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2535 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2536 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2537 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2538 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2539 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2540 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2541 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2542 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2543 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2544
2545 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2546 if (rcStrict != VINF_SUCCESS)
2547 {
2548 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2549 VBOXSTRICTRC_VAL(rcStrict)));
2550 return rcStrict;
2551 }
2552 }
2553 else
2554 {
2555 /*
2556 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2557 */
2558 uint8_t bUnmapInfoCurTss16;
2559 void *pvCurTss16;
2560 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2561 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2562 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2563 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2564 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2565 if (rcStrict != VINF_SUCCESS)
2566 {
2567 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2568 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2569 return rcStrict;
2570 }
2571
2572 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2573 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2574 pCurTss16->ip = uNextEip;
2575 pCurTss16->flags = (uint16_t)fEFlags;
2576 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2577 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2578 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2579 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2580 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2581 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2582 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2583 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2584 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2585 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2586 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2587 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2588
2589 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2590 if (rcStrict != VINF_SUCCESS)
2591 {
2592 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2593 VBOXSTRICTRC_VAL(rcStrict)));
2594 return rcStrict;
2595 }
2596 }
2597
2598 /*
2599 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2600 */
2601 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2602 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2603 {
2604 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2605 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2606 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2607 }
2608
2609 /*
2610 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2611 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2612 */
2613 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2614 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2615 bool fNewDebugTrap;
2616 if (fIsNewTss386)
2617 {
2618 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2619 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2620 uNewEip = pNewTss32->eip;
2621 uNewEflags = pNewTss32->eflags;
2622 uNewEax = pNewTss32->eax;
2623 uNewEcx = pNewTss32->ecx;
2624 uNewEdx = pNewTss32->edx;
2625 uNewEbx = pNewTss32->ebx;
2626 uNewEsp = pNewTss32->esp;
2627 uNewEbp = pNewTss32->ebp;
2628 uNewEsi = pNewTss32->esi;
2629 uNewEdi = pNewTss32->edi;
2630 uNewES = pNewTss32->es;
2631 uNewCS = pNewTss32->cs;
2632 uNewSS = pNewTss32->ss;
2633 uNewDS = pNewTss32->ds;
2634 uNewFS = pNewTss32->fs;
2635 uNewGS = pNewTss32->gs;
2636 uNewLdt = pNewTss32->selLdt;
2637 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2638 }
2639 else
2640 {
2641 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2642 uNewCr3 = 0;
2643 uNewEip = pNewTss16->ip;
2644 uNewEflags = pNewTss16->flags;
2645 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2646 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2647 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2648 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2649 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2650 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2651 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2652 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2653 uNewES = pNewTss16->es;
2654 uNewCS = pNewTss16->cs;
2655 uNewSS = pNewTss16->ss;
2656 uNewDS = pNewTss16->ds;
2657 uNewFS = 0;
2658 uNewGS = 0;
2659 uNewLdt = pNewTss16->selLdt;
2660 fNewDebugTrap = false;
2661 }
2662
2663 if (GCPtrNewTss == GCPtrCurTss)
2664 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2665 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2666
2667 /*
2668 * We're done accessing the new TSS.
2669 */
2670 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2671 if (rcStrict != VINF_SUCCESS)
2672 {
2673 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2674 return rcStrict;
2675 }
2676
2677 /*
2678 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2679 */
2680 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2681 {
2682 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2683 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2684 if (rcStrict != VINF_SUCCESS)
2685 {
2686 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2687 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2688 return rcStrict;
2689 }
2690
2691 /* Check that the descriptor indicates the new TSS is available (not busy). */
2692 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2693 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2694 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2695
2696 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2697 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2698 if (rcStrict != VINF_SUCCESS)
2699 {
2700 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2701 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2702 return rcStrict;
2703 }
2704 }
2705
2706 /*
2707 * From this point on, we're technically in the new task. We will defer exceptions
2708 * until the completion of the task switch but before executing any instructions in the new task.
2709 */
2710 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2711 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2712 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2713 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2714 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2715 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2716 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2717
2718 /* Set the busy bit in TR. */
2719 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2720
2721 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2722 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2723 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2724 {
2725 uNewEflags |= X86_EFL_NT;
2726 }
2727
2728 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2729 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2730 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2731
2732 pVCpu->cpum.GstCtx.eip = uNewEip;
2733 pVCpu->cpum.GstCtx.eax = uNewEax;
2734 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2735 pVCpu->cpum.GstCtx.edx = uNewEdx;
2736 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2737 pVCpu->cpum.GstCtx.esp = uNewEsp;
2738 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2739 pVCpu->cpum.GstCtx.esi = uNewEsi;
2740 pVCpu->cpum.GstCtx.edi = uNewEdi;
2741
2742 uNewEflags &= X86_EFL_LIVE_MASK;
2743 uNewEflags |= X86_EFL_RA1_MASK;
2744 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2745
2746 /*
2747 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2748 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2749 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2750 */
2751 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2752 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2753
2754 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2755 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2756
2757 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2758 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2759
2760 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2761 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2762
2763 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2764 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2765
2766 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2767 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2768 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2769
2770 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2771 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2772 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2773 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2774
2775 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2776 {
2777 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2778 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2779 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2780 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2781 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2782 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2783 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2784 }
2785
2786 /*
2787 * Switch CR3 for the new task.
2788 */
2789 if ( fIsNewTss386
2790 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2791 {
2792 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2793 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2794 AssertRCSuccessReturn(rc, rc);
2795
2796 /* Inform PGM. */
2797 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2798 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2799 AssertRCReturn(rc, rc);
2800 /* ignore informational status codes */
2801
2802 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2803 }
2804
2805 /*
2806 * Switch LDTR for the new task.
2807 */
2808 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2809 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2810 else
2811 {
2812 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2813
2814 IEMSELDESC DescNewLdt;
2815 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2816 if (rcStrict != VINF_SUCCESS)
2817 {
2818 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2819 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2820 return rcStrict;
2821 }
2822 if ( !DescNewLdt.Legacy.Gen.u1Present
2823 || DescNewLdt.Legacy.Gen.u1DescType
2824 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2825 {
2826 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2827 uNewLdt, DescNewLdt.Legacy.u));
2828 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2829 }
2830
2831 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2832 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2833 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2834 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2835 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2836 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2837 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2838 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2839 }
2840
2841 IEMSELDESC DescSS;
2842 if (IEM_IS_V86_MODE(pVCpu))
2843 {
2844 IEM_SET_CPL(pVCpu, 3);
2845 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2846 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2847 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2848 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2849 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2850 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2851
2852 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2853 DescSS.Legacy.u = 0;
2854 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2855 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2856 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2857 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2858 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2859 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2860 DescSS.Legacy.Gen.u2Dpl = 3;
2861 }
2862 else
2863 {
2864 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2865
2866 /*
2867 * Load the stack segment for the new task.
2868 */
2869 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2870 {
2871 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2872 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2873 }
2874
2875 /* Fetch the descriptor. */
2876 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2877 if (rcStrict != VINF_SUCCESS)
2878 {
2879 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2880 VBOXSTRICTRC_VAL(rcStrict)));
2881 return rcStrict;
2882 }
2883
2884 /* SS must be a data segment and writable. */
2885 if ( !DescSS.Legacy.Gen.u1DescType
2886 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2887 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2888 {
2889 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2890 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2891 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2892 }
2893
2894 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2895 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2896 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2897 {
2898 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2899 uNewCpl));
2900 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2901 }
2902
2903 /* Is it there? */
2904 if (!DescSS.Legacy.Gen.u1Present)
2905 {
2906 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2907 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2908 }
2909
2910 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2911 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2912
2913 /* Set the accessed bit before committing the result into SS. */
2914 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2915 {
2916 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2917 if (rcStrict != VINF_SUCCESS)
2918 return rcStrict;
2919 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2920 }
2921
2922 /* Commit SS. */
2923 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2924 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2925 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2926 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2927 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2928 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2929 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2930
2931 /* CPL has changed, update IEM before loading rest of segments. */
2932 IEM_SET_CPL(pVCpu, uNewCpl);
2933
2934 /*
2935 * Load the data segments for the new task.
2936 */
2937 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2938 if (rcStrict != VINF_SUCCESS)
2939 return rcStrict;
2940 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2941 if (rcStrict != VINF_SUCCESS)
2942 return rcStrict;
2943 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2944 if (rcStrict != VINF_SUCCESS)
2945 return rcStrict;
2946 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2947 if (rcStrict != VINF_SUCCESS)
2948 return rcStrict;
2949
2950 /*
2951 * Load the code segment for the new task.
2952 */
2953 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2954 {
2955 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2956 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2957 }
2958
2959 /* Fetch the descriptor. */
2960 IEMSELDESC DescCS;
2961 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2962 if (rcStrict != VINF_SUCCESS)
2963 {
2964 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2965 return rcStrict;
2966 }
2967
2968 /* CS must be a code segment. */
2969 if ( !DescCS.Legacy.Gen.u1DescType
2970 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2971 {
2972 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2973 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2974 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2975 }
2976
2977 /* For conforming CS, DPL must be less than or equal to the RPL. */
2978 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2979 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2980 {
2981 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2982 DescCS.Legacy.Gen.u2Dpl));
2983 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2984 }
2985
2986 /* For non-conforming CS, DPL must match RPL. */
2987 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2988 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2989 {
2990 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2991 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2992 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2993 }
2994
2995 /* Is it there? */
2996 if (!DescCS.Legacy.Gen.u1Present)
2997 {
2998 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2999 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3000 }
3001
3002 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3003 u64Base = X86DESC_BASE(&DescCS.Legacy);
3004
3005 /* Set the accessed bit before committing the result into CS. */
3006 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3007 {
3008 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3009 if (rcStrict != VINF_SUCCESS)
3010 return rcStrict;
3011 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3012 }
3013
3014 /* Commit CS. */
3015 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3016 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3017 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3018 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3019 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3020 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3021 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3022 }
3023
3024 /* Make sure the CPU mode is correct. */
3025 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3026 if (fExecNew != pVCpu->iem.s.fExec)
3027 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3028 pVCpu->iem.s.fExec = fExecNew;
3029
3030 /** @todo Debug trap. */
3031 if (fIsNewTss386 && fNewDebugTrap)
3032 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3033
3034 /*
3035 * Construct the error code masks based on what caused this task switch.
3036 * See Intel Instruction reference for INT.
3037 */
3038 uint16_t uExt;
3039 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3040 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3041 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3042 uExt = 1;
3043 else
3044 uExt = 0;
3045
3046 /*
3047 * Push any error code on to the new stack.
3048 */
3049 if (fFlags & IEM_XCPT_FLAGS_ERR)
3050 {
3051 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3052 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3053 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3054
3055 /* Check that there is sufficient space on the stack. */
3056 /** @todo Factor out segment limit checking for normal/expand down segments
3057 * into a separate function. */
3058 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3059 {
3060 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3061 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3062 {
3063 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3064 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3065 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3066 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3067 }
3068 }
3069 else
3070 {
3071 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3072 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3073 {
3074 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3075 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3076 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3077 }
3078 }
3079
3080
3081 if (fIsNewTss386)
3082 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3083 else
3084 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3085 if (rcStrict != VINF_SUCCESS)
3086 {
3087 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3088 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3089 return rcStrict;
3090 }
3091 }
3092
3093 /* Check the new EIP against the new CS limit. */
3094 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3095 {
3096 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3097 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3098 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3099 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3100 }
3101
3102 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3103 pVCpu->cpum.GstCtx.ss.Sel));
3104 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3105}
3106
3107
3108/**
3109 * Implements exceptions and interrupts for protected mode.
3110 *
3111 * @returns VBox strict status code.
3112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3113 * @param cbInstr The number of bytes to offset rIP by in the return
3114 * address.
3115 * @param u8Vector The interrupt / exception vector number.
3116 * @param fFlags The flags.
3117 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3118 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3119 */
3120static VBOXSTRICTRC
3121iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3122 uint8_t cbInstr,
3123 uint8_t u8Vector,
3124 uint32_t fFlags,
3125 uint16_t uErr,
3126 uint64_t uCr2) RT_NOEXCEPT
3127{
3128 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3129
3130 /*
3131 * Read the IDT entry.
3132 */
3133 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3134 {
3135 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3136 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3137 }
3138 X86DESC Idte;
3139 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3140 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3141 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3142 {
3143 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3144 return rcStrict;
3145 }
3146 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3147 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3148 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3149 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3150
3151 /*
3152 * Check the descriptor type, DPL and such.
3153 * ASSUMES this is done in the same order as described for call-gate calls.
3154 */
3155 if (Idte.Gate.u1DescType)
3156 {
3157 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3158 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3159 }
3160 bool fTaskGate = false;
3161 uint8_t f32BitGate = true;
3162 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3163 switch (Idte.Gate.u4Type)
3164 {
3165 case X86_SEL_TYPE_SYS_UNDEFINED:
3166 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3167 case X86_SEL_TYPE_SYS_LDT:
3168 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3169 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3170 case X86_SEL_TYPE_SYS_UNDEFINED2:
3171 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3172 case X86_SEL_TYPE_SYS_UNDEFINED3:
3173 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3174 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3175 case X86_SEL_TYPE_SYS_UNDEFINED4:
3176 {
3177 /** @todo check what actually happens when the type is wrong...
3178 * esp. call gates. */
3179 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3180 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3181 }
3182
3183 case X86_SEL_TYPE_SYS_286_INT_GATE:
3184 f32BitGate = false;
3185 RT_FALL_THRU();
3186 case X86_SEL_TYPE_SYS_386_INT_GATE:
3187 fEflToClear |= X86_EFL_IF;
3188 break;
3189
3190 case X86_SEL_TYPE_SYS_TASK_GATE:
3191 fTaskGate = true;
3192#ifndef IEM_IMPLEMENTS_TASKSWITCH
3193 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3194#endif
3195 break;
3196
3197 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3198 f32BitGate = false;
3199 break;
3200 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3201 break;
3202
3203 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3204 }
3205
3206 /* Check DPL against CPL if applicable. */
3207 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3208 {
3209 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3210 {
3211 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3212 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3213 }
3214 }
3215
3216 /* Is it there? */
3217 if (!Idte.Gate.u1Present)
3218 {
3219 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3220 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3221 }
3222
3223 /* Is it a task-gate? */
3224 if (fTaskGate)
3225 {
3226 /*
3227 * Construct the error code masks based on what caused this task switch.
3228 * See Intel Instruction reference for INT.
3229 */
3230 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3231 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3232 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3233 RTSEL SelTss = Idte.Gate.u16Sel;
3234
3235 /*
3236 * Fetch the TSS descriptor in the GDT.
3237 */
3238 IEMSELDESC DescTSS;
3239 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3240 if (rcStrict != VINF_SUCCESS)
3241 {
3242 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3243 VBOXSTRICTRC_VAL(rcStrict)));
3244 return rcStrict;
3245 }
3246
3247 /* The TSS descriptor must be a system segment and be available (not busy). */
3248 if ( DescTSS.Legacy.Gen.u1DescType
3249 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3250 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3251 {
3252 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3253 u8Vector, SelTss, DescTSS.Legacy.au64));
3254 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3255 }
3256
3257 /* The TSS must be present. */
3258 if (!DescTSS.Legacy.Gen.u1Present)
3259 {
3260 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3261 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3262 }
3263
3264 /* Do the actual task switch. */
3265 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3266 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3267 fFlags, uErr, uCr2, SelTss, &DescTSS);
3268 }
3269
3270 /* A null CS is bad. */
3271 RTSEL NewCS = Idte.Gate.u16Sel;
3272 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3273 {
3274 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3275 return iemRaiseGeneralProtectionFault0(pVCpu);
3276 }
3277
3278 /* Fetch the descriptor for the new CS. */
3279 IEMSELDESC DescCS;
3280 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3281 if (rcStrict != VINF_SUCCESS)
3282 {
3283 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3284 return rcStrict;
3285 }
3286
3287 /* Must be a code segment. */
3288 if (!DescCS.Legacy.Gen.u1DescType)
3289 {
3290 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3291 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3292 }
3293 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3294 {
3295 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3296 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3297 }
3298
3299 /* Don't allow lowering the privilege level. */
3300 /** @todo Does the lowering of privileges apply to software interrupts
3301 * only? This has bearings on the more-privileged or
3302 * same-privilege stack behavior further down. A testcase would
3303 * be nice. */
3304 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3305 {
3306 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3307 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3308 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3309 }
3310
3311 /* Make sure the selector is present. */
3312 if (!DescCS.Legacy.Gen.u1Present)
3313 {
3314 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3315 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3316 }
3317
3318#ifdef LOG_ENABLED
3319 /* If software interrupt, try decode it if logging is enabled and such. */
3320 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3321 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3322 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3323#endif
3324
3325 /* Check the new EIP against the new CS limit. */
3326 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3327 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3328 ? Idte.Gate.u16OffsetLow
3329 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3330 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3331 if (uNewEip > cbLimitCS)
3332 {
3333 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3334 u8Vector, uNewEip, cbLimitCS, NewCS));
3335 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3336 }
3337 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3338
3339 /* Calc the flag image to push. */
3340 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3341 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3342 fEfl &= ~X86_EFL_RF;
3343 else
3344 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3345
3346 /* From V8086 mode only go to CPL 0. */
3347 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3348 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3349 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3350 {
3351 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3352 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3353 }
3354
3355 /*
3356 * If the privilege level changes, we need to get a new stack from the TSS.
3357 * This in turns means validating the new SS and ESP...
3358 */
3359 if (uNewCpl != IEM_GET_CPL(pVCpu))
3360 {
3361 RTSEL NewSS;
3362 uint32_t uNewEsp;
3363 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3364 if (rcStrict != VINF_SUCCESS)
3365 return rcStrict;
3366
3367 IEMSELDESC DescSS;
3368 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3369 if (rcStrict != VINF_SUCCESS)
3370 return rcStrict;
3371 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3372 if (!DescSS.Legacy.Gen.u1DefBig)
3373 {
3374 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3375 uNewEsp = (uint16_t)uNewEsp;
3376 }
3377
3378 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3379
3380 /* Check that there is sufficient space for the stack frame. */
3381 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3382 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3383 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3384 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3385
3386 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3387 {
3388 if ( uNewEsp - 1 > cbLimitSS
3389 || uNewEsp < cbStackFrame)
3390 {
3391 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3392 u8Vector, NewSS, uNewEsp, cbStackFrame));
3393 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3394 }
3395 }
3396 else
3397 {
3398 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3399 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3400 {
3401 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3402 u8Vector, NewSS, uNewEsp, cbStackFrame));
3403 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3404 }
3405 }
3406
3407 /*
3408 * Start making changes.
3409 */
3410
3411 /* Set the new CPL so that stack accesses use it. */
3412 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3413 IEM_SET_CPL(pVCpu, uNewCpl);
3414
3415 /* Create the stack frame. */
3416 uint8_t bUnmapInfoStackFrame;
3417 RTPTRUNION uStackFrame;
3418 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3419 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3420 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3421 if (rcStrict != VINF_SUCCESS)
3422 return rcStrict;
3423 if (f32BitGate)
3424 {
3425 if (fFlags & IEM_XCPT_FLAGS_ERR)
3426 *uStackFrame.pu32++ = uErr;
3427 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3428 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3429 uStackFrame.pu32[2] = fEfl;
3430 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3431 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3432 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3433 if (fEfl & X86_EFL_VM)
3434 {
3435 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3436 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3437 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3438 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3439 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3440 }
3441 }
3442 else
3443 {
3444 if (fFlags & IEM_XCPT_FLAGS_ERR)
3445 *uStackFrame.pu16++ = uErr;
3446 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3447 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3448 uStackFrame.pu16[2] = fEfl;
3449 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3450 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3451 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3452 if (fEfl & X86_EFL_VM)
3453 {
3454 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3455 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3456 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3457 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3458 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3459 }
3460 }
3461 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3462 if (rcStrict != VINF_SUCCESS)
3463 return rcStrict;
3464
3465 /* Mark the selectors 'accessed' (hope this is the correct time). */
3466 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3467 * after pushing the stack frame? (Write protect the gdt + stack to
3468 * find out.) */
3469 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3470 {
3471 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3472 if (rcStrict != VINF_SUCCESS)
3473 return rcStrict;
3474 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3475 }
3476
3477 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3478 {
3479 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3480 if (rcStrict != VINF_SUCCESS)
3481 return rcStrict;
3482 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3483 }
3484
3485 /*
3486 * Start comitting the register changes (joins with the DPL=CPL branch).
3487 */
3488 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3489 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3490 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3491 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3492 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3493 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3494 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3495 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3496 * SP is loaded).
3497 * Need to check the other combinations too:
3498 * - 16-bit TSS, 32-bit handler
3499 * - 32-bit TSS, 16-bit handler */
3500 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3501 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3502 else
3503 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3504
3505 if (fEfl & X86_EFL_VM)
3506 {
3507 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3508 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3509 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3510 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3511 }
3512 }
3513 /*
3514 * Same privilege, no stack change and smaller stack frame.
3515 */
3516 else
3517 {
3518 uint64_t uNewRsp;
3519 uint8_t bUnmapInfoStackFrame;
3520 RTPTRUNION uStackFrame;
3521 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3522 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3523 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3524 if (rcStrict != VINF_SUCCESS)
3525 return rcStrict;
3526
3527 if (f32BitGate)
3528 {
3529 if (fFlags & IEM_XCPT_FLAGS_ERR)
3530 *uStackFrame.pu32++ = uErr;
3531 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3532 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3533 uStackFrame.pu32[2] = fEfl;
3534 }
3535 else
3536 {
3537 if (fFlags & IEM_XCPT_FLAGS_ERR)
3538 *uStackFrame.pu16++ = uErr;
3539 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3540 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3541 uStackFrame.pu16[2] = fEfl;
3542 }
3543 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3544 if (rcStrict != VINF_SUCCESS)
3545 return rcStrict;
3546
3547 /* Mark the CS selector as 'accessed'. */
3548 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3549 {
3550 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3551 if (rcStrict != VINF_SUCCESS)
3552 return rcStrict;
3553 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3554 }
3555
3556 /*
3557 * Start committing the register changes (joins with the other branch).
3558 */
3559 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3560 }
3561
3562 /* ... register committing continues. */
3563 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3564 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3565 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3566 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3567 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3568 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3569
3570 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3571 fEfl &= ~fEflToClear;
3572 IEMMISC_SET_EFL(pVCpu, fEfl);
3573
3574 if (fFlags & IEM_XCPT_FLAGS_CR2)
3575 pVCpu->cpum.GstCtx.cr2 = uCr2;
3576
3577 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3578 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3579
3580 /* Make sure the execution flags are correct. */
3581 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3582 if (fExecNew != pVCpu->iem.s.fExec)
3583 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3584 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3585 pVCpu->iem.s.fExec = fExecNew;
3586 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3587
3588 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3589}
3590
3591
3592/**
3593 * Implements exceptions and interrupts for long mode.
3594 *
3595 * @returns VBox strict status code.
3596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3597 * @param cbInstr The number of bytes to offset rIP by in the return
3598 * address.
3599 * @param u8Vector The interrupt / exception vector number.
3600 * @param fFlags The flags.
3601 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3602 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3603 */
3604static VBOXSTRICTRC
3605iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3606 uint8_t cbInstr,
3607 uint8_t u8Vector,
3608 uint32_t fFlags,
3609 uint16_t uErr,
3610 uint64_t uCr2) RT_NOEXCEPT
3611{
3612 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3613
3614 /*
3615 * Read the IDT entry.
3616 */
3617 uint16_t offIdt = (uint16_t)u8Vector << 4;
3618 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3619 {
3620 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3621 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3622 }
3623 X86DESC64 Idte;
3624#ifdef _MSC_VER /* Shut up silly compiler warning. */
3625 Idte.au64[0] = 0;
3626 Idte.au64[1] = 0;
3627#endif
3628 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3629 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3630 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3631 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3632 {
3633 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3634 return rcStrict;
3635 }
3636 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3637 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3638 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3639
3640 /*
3641 * Check the descriptor type, DPL and such.
3642 * ASSUMES this is done in the same order as described for call-gate calls.
3643 */
3644 if (Idte.Gate.u1DescType)
3645 {
3646 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3647 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3648 }
3649 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3650 switch (Idte.Gate.u4Type)
3651 {
3652 case AMD64_SEL_TYPE_SYS_INT_GATE:
3653 fEflToClear |= X86_EFL_IF;
3654 break;
3655 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3656 break;
3657
3658 default:
3659 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3660 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3661 }
3662
3663 /* Check DPL against CPL if applicable. */
3664 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3665 {
3666 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3667 {
3668 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3669 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3670 }
3671 }
3672
3673 /* Is it there? */
3674 if (!Idte.Gate.u1Present)
3675 {
3676 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3677 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3678 }
3679
3680 /* A null CS is bad. */
3681 RTSEL NewCS = Idte.Gate.u16Sel;
3682 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3683 {
3684 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3685 return iemRaiseGeneralProtectionFault0(pVCpu);
3686 }
3687
3688 /* Fetch the descriptor for the new CS. */
3689 IEMSELDESC DescCS;
3690 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3691 if (rcStrict != VINF_SUCCESS)
3692 {
3693 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3694 return rcStrict;
3695 }
3696
3697 /* Must be a 64-bit code segment. */
3698 if (!DescCS.Long.Gen.u1DescType)
3699 {
3700 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3701 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3702 }
3703 if ( !DescCS.Long.Gen.u1Long
3704 || DescCS.Long.Gen.u1DefBig
3705 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3706 {
3707 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3708 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3709 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3710 }
3711
3712 /* Don't allow lowering the privilege level. For non-conforming CS
3713 selectors, the CS.DPL sets the privilege level the trap/interrupt
3714 handler runs at. For conforming CS selectors, the CPL remains
3715 unchanged, but the CS.DPL must be <= CPL. */
3716 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3717 * when CPU in Ring-0. Result \#GP? */
3718 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3719 {
3720 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3721 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3722 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3723 }
3724
3725
3726 /* Make sure the selector is present. */
3727 if (!DescCS.Legacy.Gen.u1Present)
3728 {
3729 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3730 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3731 }
3732
3733 /* Check that the new RIP is canonical. */
3734 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3735 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3736 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3737 if (!IEM_IS_CANONICAL(uNewRip))
3738 {
3739 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3740 return iemRaiseGeneralProtectionFault0(pVCpu);
3741 }
3742
3743 /*
3744 * If the privilege level changes or if the IST isn't zero, we need to get
3745 * a new stack from the TSS.
3746 */
3747 uint64_t uNewRsp;
3748 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3749 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3750 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3751 || Idte.Gate.u3IST != 0)
3752 {
3753 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3754 if (rcStrict != VINF_SUCCESS)
3755 return rcStrict;
3756 }
3757 else
3758 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3759 uNewRsp &= ~(uint64_t)0xf;
3760
3761 /*
3762 * Calc the flag image to push.
3763 */
3764 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3765 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3766 fEfl &= ~X86_EFL_RF;
3767 else
3768 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3769
3770 /*
3771 * Start making changes.
3772 */
3773 /* Set the new CPL so that stack accesses use it. */
3774 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3775 IEM_SET_CPL(pVCpu, uNewCpl);
3776/** @todo Setting CPL this early seems wrong as it would affect and errors we
3777 * raise accessing the stack and (?) GDT/LDT... */
3778
3779 /* Create the stack frame. */
3780 uint8_t bUnmapInfoStackFrame;
3781 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3782 RTPTRUNION uStackFrame;
3783 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3784 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3785 if (rcStrict != VINF_SUCCESS)
3786 return rcStrict;
3787
3788 if (fFlags & IEM_XCPT_FLAGS_ERR)
3789 *uStackFrame.pu64++ = uErr;
3790 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3791 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3792 uStackFrame.pu64[2] = fEfl;
3793 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3794 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3795 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3796 if (rcStrict != VINF_SUCCESS)
3797 return rcStrict;
3798
3799 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3800 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3801 * after pushing the stack frame? (Write protect the gdt + stack to
3802 * find out.) */
3803 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3804 {
3805 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3806 if (rcStrict != VINF_SUCCESS)
3807 return rcStrict;
3808 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3809 }
3810
3811 /*
3812 * Start comitting the register changes.
3813 */
3814 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3815 * hidden registers when interrupting 32-bit or 16-bit code! */
3816 if (uNewCpl != uOldCpl)
3817 {
3818 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3819 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3820 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3821 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3822 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3823 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3824 }
3825 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3826 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3827 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3828 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3829 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3830 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3831 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3832 pVCpu->cpum.GstCtx.rip = uNewRip;
3833
3834 fEfl &= ~fEflToClear;
3835 IEMMISC_SET_EFL(pVCpu, fEfl);
3836
3837 if (fFlags & IEM_XCPT_FLAGS_CR2)
3838 pVCpu->cpum.GstCtx.cr2 = uCr2;
3839
3840 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3841 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3842
3843 iemRecalcExecModeAndCplFlags(pVCpu);
3844
3845 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3846}
3847
3848
3849/**
3850 * Implements exceptions and interrupts.
3851 *
3852 * All exceptions and interrupts goes thru this function!
3853 *
3854 * @returns VBox strict status code.
3855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3856 * @param cbInstr The number of bytes to offset rIP by in the return
3857 * address.
3858 * @param u8Vector The interrupt / exception vector number.
3859 * @param fFlags The flags.
3860 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3861 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3862 */
3863VBOXSTRICTRC
3864iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3865 uint8_t cbInstr,
3866 uint8_t u8Vector,
3867 uint32_t fFlags,
3868 uint16_t uErr,
3869 uint64_t uCr2) RT_NOEXCEPT
3870{
3871 /*
3872 * Get all the state that we might need here.
3873 */
3874 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3875 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3876
3877#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3878 /*
3879 * Flush prefetch buffer
3880 */
3881 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3882#endif
3883
3884 /*
3885 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3886 */
3887 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3888 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3889 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3890 | IEM_XCPT_FLAGS_BP_INSTR
3891 | IEM_XCPT_FLAGS_ICEBP_INSTR
3892 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3893 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3894 {
3895 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3896 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3897 u8Vector = X86_XCPT_GP;
3898 uErr = 0;
3899 }
3900
3901 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3902#ifdef DBGFTRACE_ENABLED
3903 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3904 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3905 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3906#endif
3907
3908 /*
3909 * Check if DBGF wants to intercept the exception.
3910 */
3911 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
3912 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
3913 { /* likely */ }
3914 else
3915 {
3916 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
3917 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
3918 if (rcStrict != VINF_SUCCESS)
3919 return rcStrict;
3920 }
3921
3922 /*
3923 * Evaluate whether NMI blocking should be in effect.
3924 * Normally, NMI blocking is in effect whenever we inject an NMI.
3925 */
3926 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3927 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3928
3929#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3930 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3931 {
3932 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3933 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3934 return rcStrict0;
3935
3936 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3937 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3938 {
3939 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3940 fBlockNmi = false;
3941 }
3942 }
3943#endif
3944
3945#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3946 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3947 {
3948 /*
3949 * If the event is being injected as part of VMRUN, it isn't subject to event
3950 * intercepts in the nested-guest. However, secondary exceptions that occur
3951 * during injection of any event -are- subject to exception intercepts.
3952 *
3953 * See AMD spec. 15.20 "Event Injection".
3954 */
3955 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3956 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3957 else
3958 {
3959 /*
3960 * Check and handle if the event being raised is intercepted.
3961 */
3962 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3963 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3964 return rcStrict0;
3965 }
3966 }
3967#endif
3968
3969 /*
3970 * Set NMI blocking if necessary.
3971 */
3972 if (fBlockNmi)
3973 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3974
3975 /*
3976 * Do recursion accounting.
3977 */
3978 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3979 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3980 if (pVCpu->iem.s.cXcptRecursions == 0)
3981 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3982 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3983 else
3984 {
3985 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3986 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3987 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3988
3989 if (pVCpu->iem.s.cXcptRecursions >= 4)
3990 {
3991#ifdef DEBUG_bird
3992 AssertFailed();
3993#endif
3994 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3995 }
3996
3997 /*
3998 * Evaluate the sequence of recurring events.
3999 */
4000 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4001 NULL /* pXcptRaiseInfo */);
4002 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4003 { /* likely */ }
4004 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4005 {
4006 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4007 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4008 u8Vector = X86_XCPT_DF;
4009 uErr = 0;
4010#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4011 /* VMX nested-guest #DF intercept needs to be checked here. */
4012 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4013 {
4014 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4015 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4016 return rcStrict0;
4017 }
4018#endif
4019 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4020 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4021 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4022 }
4023 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4024 {
4025 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4026 return iemInitiateCpuShutdown(pVCpu);
4027 }
4028 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4029 {
4030 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4031 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4032 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4033 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4034 return VERR_EM_GUEST_CPU_HANG;
4035 }
4036 else
4037 {
4038 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4039 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4040 return VERR_IEM_IPE_9;
4041 }
4042
4043 /*
4044 * The 'EXT' bit is set when an exception occurs during deliver of an external
4045 * event (such as an interrupt or earlier exception)[1]. Privileged software
4046 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4047 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4048 *
4049 * [1] - Intel spec. 6.13 "Error Code"
4050 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4051 * [3] - Intel Instruction reference for INT n.
4052 */
4053 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4054 && (fFlags & IEM_XCPT_FLAGS_ERR)
4055 && u8Vector != X86_XCPT_PF
4056 && u8Vector != X86_XCPT_DF)
4057 {
4058 uErr |= X86_TRAP_ERR_EXTERNAL;
4059 }
4060 }
4061
4062 pVCpu->iem.s.cXcptRecursions++;
4063 pVCpu->iem.s.uCurXcpt = u8Vector;
4064 pVCpu->iem.s.fCurXcpt = fFlags;
4065 pVCpu->iem.s.uCurXcptErr = uErr;
4066 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4067
4068 /*
4069 * Extensive logging.
4070 */
4071#if defined(LOG_ENABLED) && defined(IN_RING3)
4072 if (LogIs3Enabled())
4073 {
4074 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4075 char szRegs[4096];
4076 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4077 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4078 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4079 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4080 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4081 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4082 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4083 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4084 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4085 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4086 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4087 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4088 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4089 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4090 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4091 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4092 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4093 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4094 " efer=%016VR{efer}\n"
4095 " pat=%016VR{pat}\n"
4096 " sf_mask=%016VR{sf_mask}\n"
4097 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4098 " lstar=%016VR{lstar}\n"
4099 " star=%016VR{star} cstar=%016VR{cstar}\n"
4100 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4101 );
4102
4103 char szInstr[256];
4104 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4105 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4106 szInstr, sizeof(szInstr), NULL);
4107 Log3(("%s%s\n", szRegs, szInstr));
4108 }
4109#endif /* LOG_ENABLED */
4110
4111 /*
4112 * Stats.
4113 */
4114 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4115 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4116 else if (u8Vector <= X86_XCPT_LAST)
4117 {
4118 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4119 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4120 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4121 }
4122
4123 /*
4124 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4125 * to ensure that a stale TLB or paging cache entry will only cause one
4126 * spurious #PF.
4127 */
4128 if ( u8Vector == X86_XCPT_PF
4129 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4130 IEMTlbInvalidatePage(pVCpu, uCr2);
4131
4132 /*
4133 * Call the mode specific worker function.
4134 */
4135 VBOXSTRICTRC rcStrict;
4136 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4137 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4138 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4139 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4140 else
4141 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4142
4143 /* Flush the prefetch buffer. */
4144 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4145
4146 /*
4147 * Unwind.
4148 */
4149 pVCpu->iem.s.cXcptRecursions--;
4150 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4151 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4152 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4153 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4154 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4155 return rcStrict;
4156}
4157
4158#ifdef IEM_WITH_SETJMP
4159/**
4160 * See iemRaiseXcptOrInt. Will not return.
4161 */
4162DECL_NO_RETURN(void)
4163iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4164 uint8_t cbInstr,
4165 uint8_t u8Vector,
4166 uint32_t fFlags,
4167 uint16_t uErr,
4168 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4169{
4170 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4171 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4172}
4173#endif
4174
4175
4176/** \#DE - 00. */
4177VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4178{
4179 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4180}
4181
4182
4183/** \#DB - 01.
4184 * @note This automatically clear DR7.GD. */
4185VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4186{
4187 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4188 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4189 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4190}
4191
4192
4193/** \#BR - 05. */
4194VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4195{
4196 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4197}
4198
4199
4200/** \#UD - 06. */
4201VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4202{
4203 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4204}
4205
4206
4207/** \#NM - 07. */
4208VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4209{
4210 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4211}
4212
4213
4214/** \#TS(err) - 0a. */
4215VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4216{
4217 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4218}
4219
4220
4221/** \#TS(tr) - 0a. */
4222VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4223{
4224 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4225 pVCpu->cpum.GstCtx.tr.Sel, 0);
4226}
4227
4228
4229/** \#TS(0) - 0a. */
4230VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4231{
4232 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4233 0, 0);
4234}
4235
4236
4237/** \#TS(err) - 0a. */
4238VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4239{
4240 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4241 uSel & X86_SEL_MASK_OFF_RPL, 0);
4242}
4243
4244
4245/** \#NP(err) - 0b. */
4246VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4247{
4248 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4249}
4250
4251
4252/** \#NP(sel) - 0b. */
4253VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4254{
4255 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4256 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4257 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4258 uSel & ~X86_SEL_RPL, 0);
4259}
4260
4261
4262/** \#SS(seg) - 0c. */
4263VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4264{
4265 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4266 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4267 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4268 uSel & ~X86_SEL_RPL, 0);
4269}
4270
4271
4272/** \#SS(err) - 0c. */
4273VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4274{
4275 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4276 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4277 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4278}
4279
4280
4281/** \#GP(n) - 0d. */
4282VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4283{
4284 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4285 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4286}
4287
4288
4289/** \#GP(0) - 0d. */
4290VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4291{
4292 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4293 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4294}
4295
4296#ifdef IEM_WITH_SETJMP
4297/** \#GP(0) - 0d. */
4298DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4299{
4300 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4301 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4302}
4303#endif
4304
4305
4306/** \#GP(sel) - 0d. */
4307VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4308{
4309 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4310 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4311 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4312 Sel & ~X86_SEL_RPL, 0);
4313}
4314
4315
4316/** \#GP(0) - 0d. */
4317VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4318{
4319 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4320 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4321}
4322
4323
4324/** \#GP(sel) - 0d. */
4325VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4326{
4327 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4328 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4329 NOREF(iSegReg); NOREF(fAccess);
4330 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4331 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4332}
4333
4334#ifdef IEM_WITH_SETJMP
4335/** \#GP(sel) - 0d, longjmp. */
4336DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4337{
4338 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4339 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4340 NOREF(iSegReg); NOREF(fAccess);
4341 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4342 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4343}
4344#endif
4345
4346/** \#GP(sel) - 0d. */
4347VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4348{
4349 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4350 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4351 NOREF(Sel);
4352 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4353}
4354
4355#ifdef IEM_WITH_SETJMP
4356/** \#GP(sel) - 0d, longjmp. */
4357DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4358{
4359 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4360 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4361 NOREF(Sel);
4362 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4363}
4364#endif
4365
4366
4367/** \#GP(sel) - 0d. */
4368VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4369{
4370 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4371 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4372 NOREF(iSegReg); NOREF(fAccess);
4373 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4374}
4375
4376#ifdef IEM_WITH_SETJMP
4377/** \#GP(sel) - 0d, longjmp. */
4378DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4379{
4380 NOREF(iSegReg); NOREF(fAccess);
4381 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4382}
4383#endif
4384
4385
4386/** \#PF(n) - 0e. */
4387VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4388{
4389 uint16_t uErr;
4390 switch (rc)
4391 {
4392 case VERR_PAGE_NOT_PRESENT:
4393 case VERR_PAGE_TABLE_NOT_PRESENT:
4394 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4395 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4396 uErr = 0;
4397 break;
4398
4399 default:
4400 AssertMsgFailed(("%Rrc\n", rc));
4401 RT_FALL_THRU();
4402 case VERR_ACCESS_DENIED:
4403 uErr = X86_TRAP_PF_P;
4404 break;
4405
4406 /** @todo reserved */
4407 }
4408
4409 if (IEM_GET_CPL(pVCpu) == 3)
4410 uErr |= X86_TRAP_PF_US;
4411
4412 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4413 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4414 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4415 uErr |= X86_TRAP_PF_ID;
4416
4417#if 0 /* This is so much non-sense, really. Why was it done like that? */
4418 /* Note! RW access callers reporting a WRITE protection fault, will clear
4419 the READ flag before calling. So, read-modify-write accesses (RW)
4420 can safely be reported as READ faults. */
4421 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4422 uErr |= X86_TRAP_PF_RW;
4423#else
4424 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4425 {
4426 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4427 /// (regardless of outcome of the comparison in the latter case).
4428 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4429 uErr |= X86_TRAP_PF_RW;
4430 }
4431#endif
4432
4433 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4434 of the memory operand rather than at the start of it. (Not sure what
4435 happens if it crosses a page boundrary.) The current heuristics for
4436 this is to report the #PF for the last byte if the access is more than
4437 64 bytes. This is probably not correct, but we can work that out later,
4438 main objective now is to get FXSAVE to work like for real hardware and
4439 make bs3-cpu-basic2 work. */
4440 if (cbAccess <= 64)
4441 { /* likely*/ }
4442 else
4443 GCPtrWhere += cbAccess - 1;
4444
4445 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4446 uErr, GCPtrWhere);
4447}
4448
4449#ifdef IEM_WITH_SETJMP
4450/** \#PF(n) - 0e, longjmp. */
4451DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4452 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4453{
4454 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4455}
4456#endif
4457
4458
4459/** \#MF(0) - 10. */
4460VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4461{
4462 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4463 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4464
4465 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4466 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4467 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4468}
4469
4470
4471/** \#AC(0) - 11. */
4472VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4473{
4474 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4475}
4476
4477#ifdef IEM_WITH_SETJMP
4478/** \#AC(0) - 11, longjmp. */
4479DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4480{
4481 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4482}
4483#endif
4484
4485
4486/** \#XF(0)/\#XM(0) - 19. */
4487VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4488{
4489 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4490}
4491
4492
4493/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4494IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4495{
4496 NOREF(cbInstr);
4497 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4498}
4499
4500
4501/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4502IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4503{
4504 NOREF(cbInstr);
4505 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4506}
4507
4508
4509/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4510IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4511{
4512 NOREF(cbInstr);
4513 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4514}
4515
4516
4517/** @} */
4518
4519/** @name Common opcode decoders.
4520 * @{
4521 */
4522//#include <iprt/mem.h>
4523
4524/**
4525 * Used to add extra details about a stub case.
4526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4527 */
4528void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4529{
4530#if defined(LOG_ENABLED) && defined(IN_RING3)
4531 PVM pVM = pVCpu->CTX_SUFF(pVM);
4532 char szRegs[4096];
4533 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4534 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4535 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4536 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4537 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4538 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4539 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4540 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4541 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4542 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4543 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4544 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4545 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4546 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4547 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4548 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4549 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4550 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4551 " efer=%016VR{efer}\n"
4552 " pat=%016VR{pat}\n"
4553 " sf_mask=%016VR{sf_mask}\n"
4554 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4555 " lstar=%016VR{lstar}\n"
4556 " star=%016VR{star} cstar=%016VR{cstar}\n"
4557 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4558 );
4559
4560 char szInstr[256];
4561 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4562 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4563 szInstr, sizeof(szInstr), NULL);
4564
4565 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4566#else
4567 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4568#endif
4569}
4570
4571/** @} */
4572
4573
4574
4575/** @name Register Access.
4576 * @{
4577 */
4578
4579/**
4580 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4581 *
4582 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4583 * segment limit.
4584 *
4585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4586 * @param cbInstr Instruction size.
4587 * @param offNextInstr The offset of the next instruction.
4588 * @param enmEffOpSize Effective operand size.
4589 */
4590VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4591 IEMMODE enmEffOpSize) RT_NOEXCEPT
4592{
4593 switch (enmEffOpSize)
4594 {
4595 case IEMMODE_16BIT:
4596 {
4597 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4598 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4599 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4600 pVCpu->cpum.GstCtx.rip = uNewIp;
4601 else
4602 return iemRaiseGeneralProtectionFault0(pVCpu);
4603 break;
4604 }
4605
4606 case IEMMODE_32BIT:
4607 {
4608 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4609 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4610
4611 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4612 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4613 pVCpu->cpum.GstCtx.rip = uNewEip;
4614 else
4615 return iemRaiseGeneralProtectionFault0(pVCpu);
4616 break;
4617 }
4618
4619 case IEMMODE_64BIT:
4620 {
4621 Assert(IEM_IS_64BIT_CODE(pVCpu));
4622
4623 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4624 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4625 pVCpu->cpum.GstCtx.rip = uNewRip;
4626 else
4627 return iemRaiseGeneralProtectionFault0(pVCpu);
4628 break;
4629 }
4630
4631 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4632 }
4633
4634#ifndef IEM_WITH_CODE_TLB
4635 /* Flush the prefetch buffer. */
4636 pVCpu->iem.s.cbOpcode = cbInstr;
4637#endif
4638
4639 /*
4640 * Clear RF and finish the instruction (maybe raise #DB).
4641 */
4642 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4643}
4644
4645
4646/**
4647 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4648 *
4649 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4650 * segment limit.
4651 *
4652 * @returns Strict VBox status code.
4653 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4654 * @param cbInstr Instruction size.
4655 * @param offNextInstr The offset of the next instruction.
4656 */
4657VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4658{
4659 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4660
4661 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4662 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4663 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4664 pVCpu->cpum.GstCtx.rip = uNewIp;
4665 else
4666 return iemRaiseGeneralProtectionFault0(pVCpu);
4667
4668#ifndef IEM_WITH_CODE_TLB
4669 /* Flush the prefetch buffer. */
4670 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4671#endif
4672
4673 /*
4674 * Clear RF and finish the instruction (maybe raise #DB).
4675 */
4676 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4677}
4678
4679
4680/**
4681 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4682 *
4683 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4684 * segment limit.
4685 *
4686 * @returns Strict VBox status code.
4687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4688 * @param cbInstr Instruction size.
4689 * @param offNextInstr The offset of the next instruction.
4690 * @param enmEffOpSize Effective operand size.
4691 */
4692VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4693 IEMMODE enmEffOpSize) RT_NOEXCEPT
4694{
4695 if (enmEffOpSize == IEMMODE_32BIT)
4696 {
4697 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4698
4699 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4700 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4701 pVCpu->cpum.GstCtx.rip = uNewEip;
4702 else
4703 return iemRaiseGeneralProtectionFault0(pVCpu);
4704 }
4705 else
4706 {
4707 Assert(enmEffOpSize == IEMMODE_64BIT);
4708
4709 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4710 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4711 pVCpu->cpum.GstCtx.rip = uNewRip;
4712 else
4713 return iemRaiseGeneralProtectionFault0(pVCpu);
4714 }
4715
4716#ifndef IEM_WITH_CODE_TLB
4717 /* Flush the prefetch buffer. */
4718 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4719#endif
4720
4721 /*
4722 * Clear RF and finish the instruction (maybe raise #DB).
4723 */
4724 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4725}
4726
4727/** @} */
4728
4729
4730/** @name FPU access and helpers.
4731 *
4732 * @{
4733 */
4734
4735/**
4736 * Updates the x87.DS and FPUDP registers.
4737 *
4738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4739 * @param pFpuCtx The FPU context.
4740 * @param iEffSeg The effective segment register.
4741 * @param GCPtrEff The effective address relative to @a iEffSeg.
4742 */
4743DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4744{
4745 RTSEL sel;
4746 switch (iEffSeg)
4747 {
4748 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4749 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4750 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4751 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4752 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4753 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4754 default:
4755 AssertMsgFailed(("%d\n", iEffSeg));
4756 sel = pVCpu->cpum.GstCtx.ds.Sel;
4757 }
4758 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4759 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4760 {
4761 pFpuCtx->DS = 0;
4762 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4763 }
4764 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4765 {
4766 pFpuCtx->DS = sel;
4767 pFpuCtx->FPUDP = GCPtrEff;
4768 }
4769 else
4770 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4771}
4772
4773
4774/**
4775 * Rotates the stack registers in the push direction.
4776 *
4777 * @param pFpuCtx The FPU context.
4778 * @remarks This is a complete waste of time, but fxsave stores the registers in
4779 * stack order.
4780 */
4781DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4782{
4783 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4784 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4785 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4786 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4787 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4788 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4789 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4790 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4791 pFpuCtx->aRegs[0].r80 = r80Tmp;
4792}
4793
4794
4795/**
4796 * Rotates the stack registers in the pop direction.
4797 *
4798 * @param pFpuCtx The FPU context.
4799 * @remarks This is a complete waste of time, but fxsave stores the registers in
4800 * stack order.
4801 */
4802DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4803{
4804 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4805 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4806 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4807 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4808 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4809 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4810 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4811 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4812 pFpuCtx->aRegs[7].r80 = r80Tmp;
4813}
4814
4815
4816/**
4817 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4818 * exception prevents it.
4819 *
4820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4821 * @param pResult The FPU operation result to push.
4822 * @param pFpuCtx The FPU context.
4823 */
4824static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4825{
4826 /* Update FSW and bail if there are pending exceptions afterwards. */
4827 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4828 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4829 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4830 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4831 {
4832 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4833 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4834 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4835 pFpuCtx->FSW = fFsw;
4836 return;
4837 }
4838
4839 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4840 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4841 {
4842 /* All is fine, push the actual value. */
4843 pFpuCtx->FTW |= RT_BIT(iNewTop);
4844 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4845 }
4846 else if (pFpuCtx->FCW & X86_FCW_IM)
4847 {
4848 /* Masked stack overflow, push QNaN. */
4849 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4850 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4851 }
4852 else
4853 {
4854 /* Raise stack overflow, don't push anything. */
4855 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4856 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4857 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4858 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4859 return;
4860 }
4861
4862 fFsw &= ~X86_FSW_TOP_MASK;
4863 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4864 pFpuCtx->FSW = fFsw;
4865
4866 iemFpuRotateStackPush(pFpuCtx);
4867 RT_NOREF(pVCpu);
4868}
4869
4870
4871/**
4872 * Stores a result in a FPU register and updates the FSW and FTW.
4873 *
4874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4875 * @param pFpuCtx The FPU context.
4876 * @param pResult The result to store.
4877 * @param iStReg Which FPU register to store it in.
4878 */
4879static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4880{
4881 Assert(iStReg < 8);
4882 uint16_t fNewFsw = pFpuCtx->FSW;
4883 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4884 fNewFsw &= ~X86_FSW_C_MASK;
4885 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4886 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4887 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4888 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4889 pFpuCtx->FSW = fNewFsw;
4890 pFpuCtx->FTW |= RT_BIT(iReg);
4891 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4892 RT_NOREF(pVCpu);
4893}
4894
4895
4896/**
4897 * Only updates the FPU status word (FSW) with the result of the current
4898 * instruction.
4899 *
4900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4901 * @param pFpuCtx The FPU context.
4902 * @param u16FSW The FSW output of the current instruction.
4903 */
4904static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4905{
4906 uint16_t fNewFsw = pFpuCtx->FSW;
4907 fNewFsw &= ~X86_FSW_C_MASK;
4908 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4909 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4910 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4911 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4912 pFpuCtx->FSW = fNewFsw;
4913 RT_NOREF(pVCpu);
4914}
4915
4916
4917/**
4918 * Pops one item off the FPU stack if no pending exception prevents it.
4919 *
4920 * @param pFpuCtx The FPU context.
4921 */
4922static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4923{
4924 /* Check pending exceptions. */
4925 uint16_t uFSW = pFpuCtx->FSW;
4926 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4927 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4928 return;
4929
4930 /* TOP--. */
4931 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4932 uFSW &= ~X86_FSW_TOP_MASK;
4933 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4934 pFpuCtx->FSW = uFSW;
4935
4936 /* Mark the previous ST0 as empty. */
4937 iOldTop >>= X86_FSW_TOP_SHIFT;
4938 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4939
4940 /* Rotate the registers. */
4941 iemFpuRotateStackPop(pFpuCtx);
4942}
4943
4944
4945/**
4946 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4947 *
4948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4949 * @param pResult The FPU operation result to push.
4950 * @param uFpuOpcode The FPU opcode value.
4951 */
4952void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4953{
4954 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4955 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4956 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4957}
4958
4959
4960/**
4961 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4962 * and sets FPUDP and FPUDS.
4963 *
4964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4965 * @param pResult The FPU operation result to push.
4966 * @param iEffSeg The effective segment register.
4967 * @param GCPtrEff The effective address relative to @a iEffSeg.
4968 * @param uFpuOpcode The FPU opcode value.
4969 */
4970void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
4971 uint16_t uFpuOpcode) RT_NOEXCEPT
4972{
4973 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4974 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4975 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4976 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4977}
4978
4979
4980/**
4981 * Replace ST0 with the first value and push the second onto the FPU stack,
4982 * unless a pending exception prevents it.
4983 *
4984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4985 * @param pResult The FPU operation result to store and push.
4986 * @param uFpuOpcode The FPU opcode value.
4987 */
4988void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4989{
4990 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4991 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4992
4993 /* Update FSW and bail if there are pending exceptions afterwards. */
4994 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4995 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4996 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4997 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4998 {
4999 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5000 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5001 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5002 pFpuCtx->FSW = fFsw;
5003 return;
5004 }
5005
5006 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5007 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5008 {
5009 /* All is fine, push the actual value. */
5010 pFpuCtx->FTW |= RT_BIT(iNewTop);
5011 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5012 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5013 }
5014 else if (pFpuCtx->FCW & X86_FCW_IM)
5015 {
5016 /* Masked stack overflow, push QNaN. */
5017 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5018 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5019 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5020 }
5021 else
5022 {
5023 /* Raise stack overflow, don't push anything. */
5024 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5025 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5026 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5027 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5028 return;
5029 }
5030
5031 fFsw &= ~X86_FSW_TOP_MASK;
5032 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5033 pFpuCtx->FSW = fFsw;
5034
5035 iemFpuRotateStackPush(pFpuCtx);
5036}
5037
5038
5039/**
5040 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5041 * FOP.
5042 *
5043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5044 * @param pResult The result to store.
5045 * @param iStReg Which FPU register to store it in.
5046 * @param uFpuOpcode The FPU opcode value.
5047 */
5048void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5049{
5050 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5051 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5052 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5053}
5054
5055
5056/**
5057 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5058 * FOP, and then pops the stack.
5059 *
5060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5061 * @param pResult The result to store.
5062 * @param iStReg Which FPU register to store it in.
5063 * @param uFpuOpcode The FPU opcode value.
5064 */
5065void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5066{
5067 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5068 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5069 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5070 iemFpuMaybePopOne(pFpuCtx);
5071}
5072
5073
5074/**
5075 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5076 * FPUDP, and FPUDS.
5077 *
5078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5079 * @param pResult The result to store.
5080 * @param iStReg Which FPU register to store it in.
5081 * @param iEffSeg The effective memory operand selector register.
5082 * @param GCPtrEff The effective memory operand offset.
5083 * @param uFpuOpcode The FPU opcode value.
5084 */
5085void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5086 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5087{
5088 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5089 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5090 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5091 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5092}
5093
5094
5095/**
5096 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5097 * FPUDP, and FPUDS, and then pops the stack.
5098 *
5099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5100 * @param pResult The result to store.
5101 * @param iStReg Which FPU register to store it in.
5102 * @param iEffSeg The effective memory operand selector register.
5103 * @param GCPtrEff The effective memory operand offset.
5104 * @param uFpuOpcode The FPU opcode value.
5105 */
5106void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5107 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5108{
5109 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5110 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5111 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5112 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5113 iemFpuMaybePopOne(pFpuCtx);
5114}
5115
5116
5117/**
5118 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5119 *
5120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5121 * @param uFpuOpcode The FPU opcode value.
5122 */
5123void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5124{
5125 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5126 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5127}
5128
5129
5130/**
5131 * Updates the FSW, FOP, FPUIP, and FPUCS.
5132 *
5133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5134 * @param u16FSW The FSW from the current instruction.
5135 * @param uFpuOpcode The FPU opcode value.
5136 */
5137void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5138{
5139 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5140 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5141 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5142}
5143
5144
5145/**
5146 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5147 *
5148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5149 * @param u16FSW The FSW from the current instruction.
5150 * @param uFpuOpcode The FPU opcode value.
5151 */
5152void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5153{
5154 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5155 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5156 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5157 iemFpuMaybePopOne(pFpuCtx);
5158}
5159
5160
5161/**
5162 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5163 *
5164 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5165 * @param u16FSW The FSW from the current instruction.
5166 * @param iEffSeg The effective memory operand selector register.
5167 * @param GCPtrEff The effective memory operand offset.
5168 * @param uFpuOpcode The FPU opcode value.
5169 */
5170void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5171{
5172 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5173 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5174 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5175 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5176}
5177
5178
5179/**
5180 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5181 *
5182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5183 * @param u16FSW The FSW from the current instruction.
5184 * @param uFpuOpcode The FPU opcode value.
5185 */
5186void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5187{
5188 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5189 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5190 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5191 iemFpuMaybePopOne(pFpuCtx);
5192 iemFpuMaybePopOne(pFpuCtx);
5193}
5194
5195
5196/**
5197 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5198 *
5199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5200 * @param u16FSW The FSW from the current instruction.
5201 * @param iEffSeg The effective memory operand selector register.
5202 * @param GCPtrEff The effective memory operand offset.
5203 * @param uFpuOpcode The FPU opcode value.
5204 */
5205void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5206 uint16_t uFpuOpcode) RT_NOEXCEPT
5207{
5208 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5209 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5210 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5211 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5212 iemFpuMaybePopOne(pFpuCtx);
5213}
5214
5215
5216/**
5217 * Worker routine for raising an FPU stack underflow exception.
5218 *
5219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5220 * @param pFpuCtx The FPU context.
5221 * @param iStReg The stack register being accessed.
5222 */
5223static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5224{
5225 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5226 if (pFpuCtx->FCW & X86_FCW_IM)
5227 {
5228 /* Masked underflow. */
5229 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5230 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5231 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5232 if (iStReg != UINT8_MAX)
5233 {
5234 pFpuCtx->FTW |= RT_BIT(iReg);
5235 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5236 }
5237 }
5238 else
5239 {
5240 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5241 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5242 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5243 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5244 }
5245 RT_NOREF(pVCpu);
5246}
5247
5248
5249/**
5250 * Raises a FPU stack underflow exception.
5251 *
5252 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5253 * @param iStReg The destination register that should be loaded
5254 * with QNaN if \#IS is not masked. Specify
5255 * UINT8_MAX if none (like for fcom).
5256 * @param uFpuOpcode The FPU opcode value.
5257 */
5258void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5259{
5260 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5261 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5262 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5263}
5264
5265
5266void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5267{
5268 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5269 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5270 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5271 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5272}
5273
5274
5275void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5276{
5277 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5278 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5279 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5280 iemFpuMaybePopOne(pFpuCtx);
5281}
5282
5283
5284void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5285 uint16_t uFpuOpcode) RT_NOEXCEPT
5286{
5287 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5288 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5289 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5290 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5291 iemFpuMaybePopOne(pFpuCtx);
5292}
5293
5294
5295void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5296{
5297 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5298 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5299 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5300 iemFpuMaybePopOne(pFpuCtx);
5301 iemFpuMaybePopOne(pFpuCtx);
5302}
5303
5304
5305void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5306{
5307 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5308 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5309
5310 if (pFpuCtx->FCW & X86_FCW_IM)
5311 {
5312 /* Masked overflow - Push QNaN. */
5313 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5314 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5315 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5316 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5317 pFpuCtx->FTW |= RT_BIT(iNewTop);
5318 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5319 iemFpuRotateStackPush(pFpuCtx);
5320 }
5321 else
5322 {
5323 /* Exception pending - don't change TOP or the register stack. */
5324 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5325 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5326 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5327 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5328 }
5329}
5330
5331
5332void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5333{
5334 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5335 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5336
5337 if (pFpuCtx->FCW & X86_FCW_IM)
5338 {
5339 /* Masked overflow - Push QNaN. */
5340 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5341 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5342 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5343 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5344 pFpuCtx->FTW |= RT_BIT(iNewTop);
5345 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5346 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5347 iemFpuRotateStackPush(pFpuCtx);
5348 }
5349 else
5350 {
5351 /* Exception pending - don't change TOP or the register stack. */
5352 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5353 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5354 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5355 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5356 }
5357}
5358
5359
5360/**
5361 * Worker routine for raising an FPU stack overflow exception on a push.
5362 *
5363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5364 * @param pFpuCtx The FPU context.
5365 */
5366static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5367{
5368 if (pFpuCtx->FCW & X86_FCW_IM)
5369 {
5370 /* Masked overflow. */
5371 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5372 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5373 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5374 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5375 pFpuCtx->FTW |= RT_BIT(iNewTop);
5376 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5377 iemFpuRotateStackPush(pFpuCtx);
5378 }
5379 else
5380 {
5381 /* Exception pending - don't change TOP or the register stack. */
5382 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5383 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5384 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5385 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5386 }
5387 RT_NOREF(pVCpu);
5388}
5389
5390
5391/**
5392 * Raises a FPU stack overflow exception on a push.
5393 *
5394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5395 * @param uFpuOpcode The FPU opcode value.
5396 */
5397void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5398{
5399 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5400 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5401 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5402}
5403
5404
5405/**
5406 * Raises a FPU stack overflow exception on a push with a memory operand.
5407 *
5408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5409 * @param iEffSeg The effective memory operand selector register.
5410 * @param GCPtrEff The effective memory operand offset.
5411 * @param uFpuOpcode The FPU opcode value.
5412 */
5413void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5414{
5415 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5416 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5417 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5418 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5419}
5420
5421/** @} */
5422
5423
5424/** @name SSE+AVX SIMD access and helpers.
5425 *
5426 * @{
5427 */
5428/**
5429 * Stores a result in a SIMD XMM register, updates the MXCSR.
5430 *
5431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5432 * @param pResult The result to store.
5433 * @param iXmmReg Which SIMD XMM register to store the result in.
5434 */
5435void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5436{
5437 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5438 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5439
5440 /* The result is only updated if there is no unmasked exception pending. */
5441 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5442 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5443 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5444}
5445
5446
5447/**
5448 * Updates the MXCSR.
5449 *
5450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5451 * @param fMxcsr The new MXCSR value.
5452 */
5453void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5454{
5455 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5456 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5457}
5458/** @} */
5459
5460
5461/** @name Memory access.
5462 *
5463 * @{
5464 */
5465
5466#undef LOG_GROUP
5467#define LOG_GROUP LOG_GROUP_IEM_MEM
5468
5469/**
5470 * Updates the IEMCPU::cbWritten counter if applicable.
5471 *
5472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5473 * @param fAccess The access being accounted for.
5474 * @param cbMem The access size.
5475 */
5476DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5477{
5478 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5479 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5480 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5481}
5482
5483
5484/**
5485 * Applies the segment limit, base and attributes.
5486 *
5487 * This may raise a \#GP or \#SS.
5488 *
5489 * @returns VBox strict status code.
5490 *
5491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5492 * @param fAccess The kind of access which is being performed.
5493 * @param iSegReg The index of the segment register to apply.
5494 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5495 * TSS, ++).
5496 * @param cbMem The access size.
5497 * @param pGCPtrMem Pointer to the guest memory address to apply
5498 * segmentation to. Input and output parameter.
5499 */
5500VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5501{
5502 if (iSegReg == UINT8_MAX)
5503 return VINF_SUCCESS;
5504
5505 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5506 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5507 switch (IEM_GET_CPU_MODE(pVCpu))
5508 {
5509 case IEMMODE_16BIT:
5510 case IEMMODE_32BIT:
5511 {
5512 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5513 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5514
5515 if ( pSel->Attr.n.u1Present
5516 && !pSel->Attr.n.u1Unusable)
5517 {
5518 Assert(pSel->Attr.n.u1DescType);
5519 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5520 {
5521 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5522 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5523 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5524
5525 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5526 {
5527 /** @todo CPL check. */
5528 }
5529
5530 /*
5531 * There are two kinds of data selectors, normal and expand down.
5532 */
5533 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5534 {
5535 if ( GCPtrFirst32 > pSel->u32Limit
5536 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5537 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5538 }
5539 else
5540 {
5541 /*
5542 * The upper boundary is defined by the B bit, not the G bit!
5543 */
5544 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5545 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5546 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5547 }
5548 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5549 }
5550 else
5551 {
5552 /*
5553 * Code selector and usually be used to read thru, writing is
5554 * only permitted in real and V8086 mode.
5555 */
5556 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5557 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5558 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5559 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5560 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5561
5562 if ( GCPtrFirst32 > pSel->u32Limit
5563 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5564 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5565
5566 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5567 {
5568 /** @todo CPL check. */
5569 }
5570
5571 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5572 }
5573 }
5574 else
5575 return iemRaiseGeneralProtectionFault0(pVCpu);
5576 return VINF_SUCCESS;
5577 }
5578
5579 case IEMMODE_64BIT:
5580 {
5581 RTGCPTR GCPtrMem = *pGCPtrMem;
5582 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5583 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5584
5585 Assert(cbMem >= 1);
5586 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5587 return VINF_SUCCESS;
5588 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5589 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5590 return iemRaiseGeneralProtectionFault0(pVCpu);
5591 }
5592
5593 default:
5594 AssertFailedReturn(VERR_IEM_IPE_7);
5595 }
5596}
5597
5598
5599/**
5600 * Translates a virtual address to a physical physical address and checks if we
5601 * can access the page as specified.
5602 *
5603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5604 * @param GCPtrMem The virtual address.
5605 * @param cbAccess The access size, for raising \#PF correctly for
5606 * FXSAVE and such.
5607 * @param fAccess The intended access.
5608 * @param pGCPhysMem Where to return the physical address.
5609 */
5610VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5611 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5612{
5613 /** @todo Need a different PGM interface here. We're currently using
5614 * generic / REM interfaces. this won't cut it for R0. */
5615 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5616 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5617 * here. */
5618 PGMPTWALK Walk;
5619 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5620 if (RT_FAILURE(rc))
5621 {
5622 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5623 /** @todo Check unassigned memory in unpaged mode. */
5624 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5625#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5626 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5627 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5628#endif
5629 *pGCPhysMem = NIL_RTGCPHYS;
5630 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5631 }
5632
5633 /* If the page is writable and does not have the no-exec bit set, all
5634 access is allowed. Otherwise we'll have to check more carefully... */
5635 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5636 {
5637 /* Write to read only memory? */
5638 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5639 && !(Walk.fEffective & X86_PTE_RW)
5640 && ( ( IEM_GET_CPL(pVCpu) == 3
5641 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5642 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5643 {
5644 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5645 *pGCPhysMem = NIL_RTGCPHYS;
5646#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5647 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5648 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5649#endif
5650 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5651 }
5652
5653 /* Kernel memory accessed by userland? */
5654 if ( !(Walk.fEffective & X86_PTE_US)
5655 && IEM_GET_CPL(pVCpu) == 3
5656 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5657 {
5658 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5659 *pGCPhysMem = NIL_RTGCPHYS;
5660#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5661 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5662 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5663#endif
5664 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5665 }
5666
5667 /* Executing non-executable memory? */
5668 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5669 && (Walk.fEffective & X86_PTE_PAE_NX)
5670 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5671 {
5672 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5673 *pGCPhysMem = NIL_RTGCPHYS;
5674#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5675 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5676 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5677#endif
5678 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5679 VERR_ACCESS_DENIED);
5680 }
5681 }
5682
5683 /*
5684 * Set the dirty / access flags.
5685 * ASSUMES this is set when the address is translated rather than on committ...
5686 */
5687 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5688 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5689 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5690 {
5691 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5692 AssertRC(rc2);
5693 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5694 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5695 }
5696
5697 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5698 *pGCPhysMem = GCPhys;
5699 return VINF_SUCCESS;
5700}
5701
5702#if 0 /*unused*/
5703/**
5704 * Looks up a memory mapping entry.
5705 *
5706 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5708 * @param pvMem The memory address.
5709 * @param fAccess The access to.
5710 */
5711DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5712{
5713 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5714 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5715 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5716 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5717 return 0;
5718 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5719 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5720 return 1;
5721 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5722 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5723 return 2;
5724 return VERR_NOT_FOUND;
5725}
5726#endif
5727
5728/**
5729 * Finds a free memmap entry when using iNextMapping doesn't work.
5730 *
5731 * @returns Memory mapping index, 1024 on failure.
5732 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5733 */
5734static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5735{
5736 /*
5737 * The easy case.
5738 */
5739 if (pVCpu->iem.s.cActiveMappings == 0)
5740 {
5741 pVCpu->iem.s.iNextMapping = 1;
5742 return 0;
5743 }
5744
5745 /* There should be enough mappings for all instructions. */
5746 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5747
5748 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5749 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5750 return i;
5751
5752 AssertFailedReturn(1024);
5753}
5754
5755
5756/**
5757 * Commits a bounce buffer that needs writing back and unmaps it.
5758 *
5759 * @returns Strict VBox status code.
5760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5761 * @param iMemMap The index of the buffer to commit.
5762 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5763 * Always false in ring-3, obviously.
5764 */
5765static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5766{
5767 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5768 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5769#ifdef IN_RING3
5770 Assert(!fPostponeFail);
5771 RT_NOREF_PV(fPostponeFail);
5772#endif
5773
5774 /*
5775 * Do the writing.
5776 */
5777 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5778 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5779 {
5780 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5781 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5782 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5783 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5784 {
5785 /*
5786 * Carefully and efficiently dealing with access handler return
5787 * codes make this a little bloated.
5788 */
5789 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5790 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5791 pbBuf,
5792 cbFirst,
5793 PGMACCESSORIGIN_IEM);
5794 if (rcStrict == VINF_SUCCESS)
5795 {
5796 if (cbSecond)
5797 {
5798 rcStrict = PGMPhysWrite(pVM,
5799 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5800 pbBuf + cbFirst,
5801 cbSecond,
5802 PGMACCESSORIGIN_IEM);
5803 if (rcStrict == VINF_SUCCESS)
5804 { /* nothing */ }
5805 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5806 {
5807 LogEx(LOG_GROUP_IEM,
5808 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5809 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5810 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5811 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5812 }
5813#ifndef IN_RING3
5814 else if (fPostponeFail)
5815 {
5816 LogEx(LOG_GROUP_IEM,
5817 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5818 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5819 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5820 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5821 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5822 return iemSetPassUpStatus(pVCpu, rcStrict);
5823 }
5824#endif
5825 else
5826 {
5827 LogEx(LOG_GROUP_IEM,
5828 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5829 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5830 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5831 return rcStrict;
5832 }
5833 }
5834 }
5835 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5836 {
5837 if (!cbSecond)
5838 {
5839 LogEx(LOG_GROUP_IEM,
5840 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5841 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5842 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5843 }
5844 else
5845 {
5846 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5847 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5848 pbBuf + cbFirst,
5849 cbSecond,
5850 PGMACCESSORIGIN_IEM);
5851 if (rcStrict2 == VINF_SUCCESS)
5852 {
5853 LogEx(LOG_GROUP_IEM,
5854 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5855 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5856 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5857 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5858 }
5859 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5860 {
5861 LogEx(LOG_GROUP_IEM,
5862 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5863 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5864 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5865 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5866 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5867 }
5868#ifndef IN_RING3
5869 else if (fPostponeFail)
5870 {
5871 LogEx(LOG_GROUP_IEM,
5872 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5873 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5874 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5875 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5876 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5877 return iemSetPassUpStatus(pVCpu, rcStrict);
5878 }
5879#endif
5880 else
5881 {
5882 LogEx(LOG_GROUP_IEM,
5883 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5884 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5885 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5886 return rcStrict2;
5887 }
5888 }
5889 }
5890#ifndef IN_RING3
5891 else if (fPostponeFail)
5892 {
5893 LogEx(LOG_GROUP_IEM,
5894 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5895 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5896 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5897 if (!cbSecond)
5898 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5899 else
5900 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5901 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5902 return iemSetPassUpStatus(pVCpu, rcStrict);
5903 }
5904#endif
5905 else
5906 {
5907 LogEx(LOG_GROUP_IEM,
5908 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5909 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5910 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5911 return rcStrict;
5912 }
5913 }
5914 else
5915 {
5916 /*
5917 * No access handlers, much simpler.
5918 */
5919 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5920 if (RT_SUCCESS(rc))
5921 {
5922 if (cbSecond)
5923 {
5924 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5925 if (RT_SUCCESS(rc))
5926 { /* likely */ }
5927 else
5928 {
5929 LogEx(LOG_GROUP_IEM,
5930 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5931 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5932 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5933 return rc;
5934 }
5935 }
5936 }
5937 else
5938 {
5939 LogEx(LOG_GROUP_IEM,
5940 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5941 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5942 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5943 return rc;
5944 }
5945 }
5946 }
5947
5948#if defined(IEM_LOG_MEMORY_WRITES)
5949 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5950 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5951 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5952 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5953 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5954 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5955
5956 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5957 g_cbIemWrote = cbWrote;
5958 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5959#endif
5960
5961 /*
5962 * Free the mapping entry.
5963 */
5964 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5965 Assert(pVCpu->iem.s.cActiveMappings != 0);
5966 pVCpu->iem.s.cActiveMappings--;
5967 return VINF_SUCCESS;
5968}
5969
5970
5971/**
5972 * iemMemMap worker that deals with a request crossing pages.
5973 */
5974static VBOXSTRICTRC
5975iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
5976 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5977{
5978 Assert(cbMem <= GUEST_PAGE_SIZE);
5979
5980 /*
5981 * Do the address translations.
5982 */
5983 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5984 RTGCPHYS GCPhysFirst;
5985 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5986 if (rcStrict != VINF_SUCCESS)
5987 return rcStrict;
5988 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5989
5990 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5991 RTGCPHYS GCPhysSecond;
5992 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5993 cbSecondPage, fAccess, &GCPhysSecond);
5994 if (rcStrict != VINF_SUCCESS)
5995 return rcStrict;
5996 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5997 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5998
5999 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6000
6001 /*
6002 * Read in the current memory content if it's a read, execute or partial
6003 * write access.
6004 */
6005 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6006
6007 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6008 {
6009 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6010 {
6011 /*
6012 * Must carefully deal with access handler status codes here,
6013 * makes the code a bit bloated.
6014 */
6015 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6016 if (rcStrict == VINF_SUCCESS)
6017 {
6018 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6019 if (rcStrict == VINF_SUCCESS)
6020 { /*likely */ }
6021 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6022 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6023 else
6024 {
6025 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6026 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6027 return rcStrict;
6028 }
6029 }
6030 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6031 {
6032 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6033 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6034 {
6035 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6036 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6037 }
6038 else
6039 {
6040 LogEx(LOG_GROUP_IEM,
6041 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6042 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6043 return rcStrict2;
6044 }
6045 }
6046 else
6047 {
6048 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6049 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6050 return rcStrict;
6051 }
6052 }
6053 else
6054 {
6055 /*
6056 * No informational status codes here, much more straight forward.
6057 */
6058 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6059 if (RT_SUCCESS(rc))
6060 {
6061 Assert(rc == VINF_SUCCESS);
6062 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6063 if (RT_SUCCESS(rc))
6064 Assert(rc == VINF_SUCCESS);
6065 else
6066 {
6067 LogEx(LOG_GROUP_IEM,
6068 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6069 return rc;
6070 }
6071 }
6072 else
6073 {
6074 LogEx(LOG_GROUP_IEM,
6075 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6076 return rc;
6077 }
6078 }
6079 }
6080#ifdef VBOX_STRICT
6081 else
6082 memset(pbBuf, 0xcc, cbMem);
6083 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6084 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6085#endif
6086 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6087
6088 /*
6089 * Commit the bounce buffer entry.
6090 */
6091 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6092 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6093 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6094 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6095 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6096 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6097 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6098 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6099 pVCpu->iem.s.cActiveMappings++;
6100
6101 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6102 *ppvMem = pbBuf;
6103 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6104 return VINF_SUCCESS;
6105}
6106
6107
6108/**
6109 * iemMemMap woker that deals with iemMemPageMap failures.
6110 */
6111static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6112 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6113{
6114 /*
6115 * Filter out conditions we can handle and the ones which shouldn't happen.
6116 */
6117 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6118 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6119 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6120 {
6121 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6122 return rcMap;
6123 }
6124 pVCpu->iem.s.cPotentialExits++;
6125
6126 /*
6127 * Read in the current memory content if it's a read, execute or partial
6128 * write access.
6129 */
6130 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6131 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6132 {
6133 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6134 memset(pbBuf, 0xff, cbMem);
6135 else
6136 {
6137 int rc;
6138 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6139 {
6140 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6141 if (rcStrict == VINF_SUCCESS)
6142 { /* nothing */ }
6143 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6144 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6145 else
6146 {
6147 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6148 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6149 return rcStrict;
6150 }
6151 }
6152 else
6153 {
6154 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6155 if (RT_SUCCESS(rc))
6156 { /* likely */ }
6157 else
6158 {
6159 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6160 GCPhysFirst, rc));
6161 return rc;
6162 }
6163 }
6164 }
6165 }
6166#ifdef VBOX_STRICT
6167 else
6168 memset(pbBuf, 0xcc, cbMem);
6169#endif
6170#ifdef VBOX_STRICT
6171 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6172 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6173#endif
6174
6175 /*
6176 * Commit the bounce buffer entry.
6177 */
6178 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6179 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6180 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6181 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6182 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6183 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6184 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6185 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6186 pVCpu->iem.s.cActiveMappings++;
6187
6188 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6189 *ppvMem = pbBuf;
6190 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6191 return VINF_SUCCESS;
6192}
6193
6194
6195
6196/**
6197 * Maps the specified guest memory for the given kind of access.
6198 *
6199 * This may be using bounce buffering of the memory if it's crossing a page
6200 * boundary or if there is an access handler installed for any of it. Because
6201 * of lock prefix guarantees, we're in for some extra clutter when this
6202 * happens.
6203 *
6204 * This may raise a \#GP, \#SS, \#PF or \#AC.
6205 *
6206 * @returns VBox strict status code.
6207 *
6208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6209 * @param ppvMem Where to return the pointer to the mapped memory.
6210 * @param pbUnmapInfo Where to return unmap info to be passed to
6211 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6212 * done.
6213 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6214 * 8, 12, 16, 32 or 512. When used by string operations
6215 * it can be up to a page.
6216 * @param iSegReg The index of the segment register to use for this
6217 * access. The base and limits are checked. Use UINT8_MAX
6218 * to indicate that no segmentation is required (for IDT,
6219 * GDT and LDT accesses).
6220 * @param GCPtrMem The address of the guest memory.
6221 * @param fAccess How the memory is being accessed. The
6222 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6223 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6224 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6225 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6226 * set.
6227 * @param uAlignCtl Alignment control:
6228 * - Bits 15:0 is the alignment mask.
6229 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6230 * IEM_MEMMAP_F_ALIGN_SSE, and
6231 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6232 * Pass zero to skip alignment.
6233 */
6234VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6235 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6236{
6237 /*
6238 * Check the input and figure out which mapping entry to use.
6239 */
6240 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6241 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6242 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6243 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6244 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6245
6246 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6247 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6248 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6249 {
6250 iMemMap = iemMemMapFindFree(pVCpu);
6251 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6252 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6253 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6254 pVCpu->iem.s.aMemMappings[2].fAccess),
6255 VERR_IEM_IPE_9);
6256 }
6257
6258 /*
6259 * Map the memory, checking that we can actually access it. If something
6260 * slightly complicated happens, fall back on bounce buffering.
6261 */
6262 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6263 if (rcStrict == VINF_SUCCESS)
6264 { /* likely */ }
6265 else
6266 return rcStrict;
6267
6268 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6269 { /* likely */ }
6270 else
6271 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6272
6273 /*
6274 * Alignment check.
6275 */
6276 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6277 { /* likelyish */ }
6278 else
6279 {
6280 /* Misaligned access. */
6281 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6282 {
6283 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6284 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6285 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6286 {
6287 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6288
6289 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6290 return iemRaiseAlignmentCheckException(pVCpu);
6291 }
6292 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6293 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6294 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6295 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6296 * that's what FXSAVE does on a 10980xe. */
6297 && iemMemAreAlignmentChecksEnabled(pVCpu))
6298 return iemRaiseAlignmentCheckException(pVCpu);
6299 else
6300 return iemRaiseGeneralProtectionFault0(pVCpu);
6301 }
6302
6303#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6304 /* If the access is atomic there are host platform alignmnet restrictions
6305 we need to conform with. */
6306 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6307# if defined(RT_ARCH_AMD64)
6308 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6309# elif defined(RT_ARCH_ARM64)
6310 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6311# else
6312# error port me
6313# endif
6314 )
6315 { /* okay */ }
6316 else
6317 {
6318 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6319 pVCpu->iem.s.cMisalignedAtomics += 1;
6320 return VINF_EM_EMULATE_SPLIT_LOCK;
6321 }
6322#endif
6323 }
6324
6325#ifdef IEM_WITH_DATA_TLB
6326 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6327
6328 /*
6329 * Get the TLB entry for this page.
6330 */
6331 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6332 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6333 if (pTlbe->uTag == uTag)
6334 {
6335# ifdef VBOX_WITH_STATISTICS
6336 pVCpu->iem.s.DataTlb.cTlbHits++;
6337# endif
6338 }
6339 else
6340 {
6341 pVCpu->iem.s.DataTlb.cTlbMisses++;
6342 PGMPTWALK Walk;
6343 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6344 if (RT_FAILURE(rc))
6345 {
6346 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6347# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6348 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6349 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6350# endif
6351 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6352 }
6353
6354 Assert(Walk.fSucceeded);
6355 pTlbe->uTag = uTag;
6356 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6357 pTlbe->GCPhys = Walk.GCPhys;
6358 pTlbe->pbMappingR3 = NULL;
6359 }
6360
6361 /*
6362 * Check TLB page table level access flags.
6363 */
6364 /* If the page is either supervisor only or non-writable, we need to do
6365 more careful access checks. */
6366 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6367 {
6368 /* Write to read only memory? */
6369 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6370 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6371 && ( ( IEM_GET_CPL(pVCpu) == 3
6372 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6373 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6374 {
6375 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6376# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6377 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6378 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6379# endif
6380 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6381 }
6382
6383 /* Kernel memory accessed by userland? */
6384 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6385 && IEM_GET_CPL(pVCpu) == 3
6386 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6387 {
6388 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6389# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6390 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6391 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6392# endif
6393 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6394 }
6395 }
6396
6397 /*
6398 * Set the dirty / access flags.
6399 * ASSUMES this is set when the address is translated rather than on commit...
6400 */
6401 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6402 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6403 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6404 {
6405 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6406 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6407 AssertRC(rc2);
6408 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6409 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6410 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6411 }
6412
6413 /*
6414 * Look up the physical page info if necessary.
6415 */
6416 uint8_t *pbMem = NULL;
6417 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6418# ifdef IN_RING3
6419 pbMem = pTlbe->pbMappingR3;
6420# else
6421 pbMem = NULL;
6422# endif
6423 else
6424 {
6425 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6426 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6427 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6428 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6429 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6430 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6431 { /* likely */ }
6432 else
6433 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6434 pTlbe->pbMappingR3 = NULL;
6435 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6436 | IEMTLBE_F_NO_MAPPINGR3
6437 | IEMTLBE_F_PG_NO_READ
6438 | IEMTLBE_F_PG_NO_WRITE
6439 | IEMTLBE_F_PG_UNASSIGNED
6440 | IEMTLBE_F_PG_CODE_PAGE);
6441 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6442 &pbMem, &pTlbe->fFlagsAndPhysRev);
6443 AssertRCReturn(rc, rc);
6444# ifdef IN_RING3
6445 pTlbe->pbMappingR3 = pbMem;
6446# endif
6447 }
6448
6449 /*
6450 * Check the physical page level access and mapping.
6451 */
6452 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6453 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6454 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6455 { /* probably likely */ }
6456 else
6457 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6458 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6459 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6460 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6461 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6462 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6463
6464 if (pbMem)
6465 {
6466 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6467 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6468 fAccess |= IEM_ACCESS_NOT_LOCKED;
6469 }
6470 else
6471 {
6472 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6473 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6474 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6475 if (rcStrict != VINF_SUCCESS)
6476 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6477 }
6478
6479 void * const pvMem = pbMem;
6480
6481 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6482 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6483 if (fAccess & IEM_ACCESS_TYPE_READ)
6484 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6485
6486#else /* !IEM_WITH_DATA_TLB */
6487
6488 RTGCPHYS GCPhysFirst;
6489 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6490 if (rcStrict != VINF_SUCCESS)
6491 return rcStrict;
6492
6493 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6494 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6495 if (fAccess & IEM_ACCESS_TYPE_READ)
6496 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6497
6498 void *pvMem;
6499 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6500 if (rcStrict != VINF_SUCCESS)
6501 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6502
6503#endif /* !IEM_WITH_DATA_TLB */
6504
6505 /*
6506 * Fill in the mapping table entry.
6507 */
6508 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6509 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6510 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6511 pVCpu->iem.s.cActiveMappings += 1;
6512
6513 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6514 *ppvMem = pvMem;
6515 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6516 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6517 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6518
6519 return VINF_SUCCESS;
6520}
6521
6522
6523/**
6524 * Commits the guest memory if bounce buffered and unmaps it.
6525 *
6526 * @returns Strict VBox status code.
6527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6528 * @param bUnmapInfo Unmap info set by iemMemMap.
6529 */
6530VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6531{
6532 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6533 AssertMsgReturn( (bUnmapInfo & 0x08)
6534 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6535 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6536 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6537 VERR_NOT_FOUND);
6538
6539 /* If it's bounce buffered, we may need to write back the buffer. */
6540 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6541 {
6542 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6543 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6544 }
6545 /* Otherwise unlock it. */
6546 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6547 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6548
6549 /* Free the entry. */
6550 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6551 Assert(pVCpu->iem.s.cActiveMappings != 0);
6552 pVCpu->iem.s.cActiveMappings--;
6553 return VINF_SUCCESS;
6554}
6555
6556
6557/**
6558 * Rolls back the guest memory (conceptually only) and unmaps it.
6559 *
6560 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6561 * @param bUnmapInfo Unmap info set by iemMemMap.
6562 */
6563void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6564{
6565 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6566 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6567 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6568 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6569 == ((unsigned)bUnmapInfo >> 4),
6570 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6571
6572 /* Unlock it if necessary. */
6573 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6574 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6575
6576 /* Free the entry. */
6577 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6578 Assert(pVCpu->iem.s.cActiveMappings != 0);
6579 pVCpu->iem.s.cActiveMappings--;
6580}
6581
6582#ifdef IEM_WITH_SETJMP
6583
6584/**
6585 * Maps the specified guest memory for the given kind of access, longjmp on
6586 * error.
6587 *
6588 * This may be using bounce buffering of the memory if it's crossing a page
6589 * boundary or if there is an access handler installed for any of it. Because
6590 * of lock prefix guarantees, we're in for some extra clutter when this
6591 * happens.
6592 *
6593 * This may raise a \#GP, \#SS, \#PF or \#AC.
6594 *
6595 * @returns Pointer to the mapped memory.
6596 *
6597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6598 * @param bUnmapInfo Where to return unmap info to be passed to
6599 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6600 * iemMemCommitAndUnmapWoSafeJmp,
6601 * iemMemCommitAndUnmapRoSafeJmp,
6602 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6603 * when done.
6604 * @param cbMem The number of bytes to map. This is usually 1,
6605 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6606 * string operations it can be up to a page.
6607 * @param iSegReg The index of the segment register to use for
6608 * this access. The base and limits are checked.
6609 * Use UINT8_MAX to indicate that no segmentation
6610 * is required (for IDT, GDT and LDT accesses).
6611 * @param GCPtrMem The address of the guest memory.
6612 * @param fAccess How the memory is being accessed. The
6613 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6614 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6615 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6616 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6617 * set.
6618 * @param uAlignCtl Alignment control:
6619 * - Bits 15:0 is the alignment mask.
6620 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6621 * IEM_MEMMAP_F_ALIGN_SSE, and
6622 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6623 * Pass zero to skip alignment.
6624 */
6625void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6626 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6627{
6628 /*
6629 * Check the input, check segment access and adjust address
6630 * with segment base.
6631 */
6632 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6633 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6634 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6635
6636 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6637 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6638 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6639
6640 /*
6641 * Alignment check.
6642 */
6643 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6644 { /* likelyish */ }
6645 else
6646 {
6647 /* Misaligned access. */
6648 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6649 {
6650 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6651 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6652 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6653 {
6654 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6655
6656 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6657 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6658 }
6659 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6660 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6661 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6662 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6663 * that's what FXSAVE does on a 10980xe. */
6664 && iemMemAreAlignmentChecksEnabled(pVCpu))
6665 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6666 else
6667 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6668 }
6669
6670#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6671 /* If the access is atomic there are host platform alignmnet restrictions
6672 we need to conform with. */
6673 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6674# if defined(RT_ARCH_AMD64)
6675 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6676# elif defined(RT_ARCH_ARM64)
6677 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6678# else
6679# error port me
6680# endif
6681 )
6682 { /* okay */ }
6683 else
6684 {
6685 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6686 pVCpu->iem.s.cMisalignedAtomics += 1;
6687 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
6688 }
6689#endif
6690 }
6691
6692 /*
6693 * Figure out which mapping entry to use.
6694 */
6695 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6696 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6697 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6698 {
6699 iMemMap = iemMemMapFindFree(pVCpu);
6700 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6701 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6702 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6703 pVCpu->iem.s.aMemMappings[2].fAccess),
6704 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6705 }
6706
6707 /*
6708 * Crossing a page boundary?
6709 */
6710 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6711 { /* No (likely). */ }
6712 else
6713 {
6714 void *pvMem;
6715 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6716 if (rcStrict == VINF_SUCCESS)
6717 return pvMem;
6718 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6719 }
6720
6721#ifdef IEM_WITH_DATA_TLB
6722 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6723
6724 /*
6725 * Get the TLB entry for this page.
6726 */
6727 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6728 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6729 if (pTlbe->uTag == uTag)
6730 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6731 else
6732 {
6733 pVCpu->iem.s.DataTlb.cTlbMisses++;
6734 PGMPTWALK Walk;
6735 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6736 if (RT_FAILURE(rc))
6737 {
6738 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6739# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6740 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6741 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6742# endif
6743 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6744 }
6745
6746 Assert(Walk.fSucceeded);
6747 pTlbe->uTag = uTag;
6748 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6749 pTlbe->GCPhys = Walk.GCPhys;
6750 pTlbe->pbMappingR3 = NULL;
6751 }
6752
6753 /*
6754 * Check the flags and physical revision.
6755 */
6756 /** @todo make the caller pass these in with fAccess. */
6757 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6758 ? IEMTLBE_F_PT_NO_USER : 0;
6759 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6760 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6761 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6762 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6763 ? IEMTLBE_F_PT_NO_WRITE : 0)
6764 : 0;
6765 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6766 uint8_t *pbMem = NULL;
6767 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6768 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6769# ifdef IN_RING3
6770 pbMem = pTlbe->pbMappingR3;
6771# else
6772 pbMem = NULL;
6773# endif
6774 else
6775 {
6776 /*
6777 * Okay, something isn't quite right or needs refreshing.
6778 */
6779 /* Write to read only memory? */
6780 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6781 {
6782 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6783# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6784 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6785 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6786# endif
6787 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6788 }
6789
6790 /* Kernel memory accessed by userland? */
6791 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6792 {
6793 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6794# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6795 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6796 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6797# endif
6798 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6799 }
6800
6801 /* Set the dirty / access flags.
6802 ASSUMES this is set when the address is translated rather than on commit... */
6803 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6804 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6805 {
6806 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6807 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6808 AssertRC(rc2);
6809 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6810 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6811 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6812 }
6813
6814 /*
6815 * Check if the physical page info needs updating.
6816 */
6817 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6818# ifdef IN_RING3
6819 pbMem = pTlbe->pbMappingR3;
6820# else
6821 pbMem = NULL;
6822# endif
6823 else
6824 {
6825 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6826 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6827 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6828 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6829 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6830 pTlbe->pbMappingR3 = NULL;
6831 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6832 | IEMTLBE_F_NO_MAPPINGR3
6833 | IEMTLBE_F_PG_NO_READ
6834 | IEMTLBE_F_PG_NO_WRITE
6835 | IEMTLBE_F_PG_UNASSIGNED
6836 | IEMTLBE_F_PG_CODE_PAGE);
6837 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6838 &pbMem, &pTlbe->fFlagsAndPhysRev);
6839 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6840# ifdef IN_RING3
6841 pTlbe->pbMappingR3 = pbMem;
6842# endif
6843 }
6844
6845 /*
6846 * Check the physical page level access and mapping.
6847 */
6848 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6849 { /* probably likely */ }
6850 else
6851 {
6852 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
6853 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6854 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6855 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6856 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6857 if (rcStrict == VINF_SUCCESS)
6858 return pbMem;
6859 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6860 }
6861 }
6862 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6863
6864 if (pbMem)
6865 {
6866 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6867 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6868 fAccess |= IEM_ACCESS_NOT_LOCKED;
6869 }
6870 else
6871 {
6872 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6873 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6874 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6875 if (rcStrict == VINF_SUCCESS)
6876 {
6877 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6878 return pbMem;
6879 }
6880 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6881 }
6882
6883 void * const pvMem = pbMem;
6884
6885 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6886 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6887 if (fAccess & IEM_ACCESS_TYPE_READ)
6888 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6889
6890#else /* !IEM_WITH_DATA_TLB */
6891
6892
6893 RTGCPHYS GCPhysFirst;
6894 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6895 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6896 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6897
6898 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6899 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6900 if (fAccess & IEM_ACCESS_TYPE_READ)
6901 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6902
6903 void *pvMem;
6904 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6905 if (rcStrict == VINF_SUCCESS)
6906 { /* likely */ }
6907 else
6908 {
6909 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6910 if (rcStrict == VINF_SUCCESS)
6911 return pvMem;
6912 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6913 }
6914
6915#endif /* !IEM_WITH_DATA_TLB */
6916
6917 /*
6918 * Fill in the mapping table entry.
6919 */
6920 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6921 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6922 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6923 pVCpu->iem.s.cActiveMappings++;
6924
6925 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6926
6927 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6928 return pvMem;
6929}
6930
6931
6932/**
6933 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6934 *
6935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6936 * @param pvMem The mapping.
6937 * @param fAccess The kind of access.
6938 */
6939void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6940{
6941 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6942 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6943 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6944 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6945 == ((unsigned)bUnmapInfo >> 4),
6946 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6947
6948 /* If it's bounce buffered, we may need to write back the buffer. */
6949 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6950 {
6951 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6952 {
6953 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6954 if (rcStrict == VINF_SUCCESS)
6955 return;
6956 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6957 }
6958 }
6959 /* Otherwise unlock it. */
6960 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6961 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6962
6963 /* Free the entry. */
6964 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6965 Assert(pVCpu->iem.s.cActiveMappings != 0);
6966 pVCpu->iem.s.cActiveMappings--;
6967}
6968
6969
6970/** Fallback for iemMemCommitAndUnmapRwJmp. */
6971void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6972{
6973 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
6974 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6975}
6976
6977
6978/** Fallback for iemMemCommitAndUnmapAtJmp. */
6979void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6980{
6981 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
6982 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6983}
6984
6985
6986/** Fallback for iemMemCommitAndUnmapWoJmp. */
6987void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6988{
6989 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
6990 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6991}
6992
6993
6994/** Fallback for iemMemCommitAndUnmapRoJmp. */
6995void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6996{
6997 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
6998 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6999}
7000
7001
7002/** Fallback for iemMemRollbackAndUnmapWo. */
7003void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7004{
7005 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7006 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7007}
7008
7009#endif /* IEM_WITH_SETJMP */
7010
7011#ifndef IN_RING3
7012/**
7013 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7014 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7015 *
7016 * Allows the instruction to be completed and retired, while the IEM user will
7017 * return to ring-3 immediately afterwards and do the postponed writes there.
7018 *
7019 * @returns VBox status code (no strict statuses). Caller must check
7020 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7021 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7022 * @param pvMem The mapping.
7023 * @param fAccess The kind of access.
7024 */
7025VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7026{
7027 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7028 AssertMsgReturn( (bUnmapInfo & 0x08)
7029 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7030 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7031 == ((unsigned)bUnmapInfo >> 4),
7032 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7033 VERR_NOT_FOUND);
7034
7035 /* If it's bounce buffered, we may need to write back the buffer. */
7036 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7037 {
7038 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7039 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7040 }
7041 /* Otherwise unlock it. */
7042 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7043 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7044
7045 /* Free the entry. */
7046 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7047 Assert(pVCpu->iem.s.cActiveMappings != 0);
7048 pVCpu->iem.s.cActiveMappings--;
7049 return VINF_SUCCESS;
7050}
7051#endif
7052
7053
7054/**
7055 * Rollbacks mappings, releasing page locks and such.
7056 *
7057 * The caller shall only call this after checking cActiveMappings.
7058 *
7059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7060 */
7061void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7062{
7063 Assert(pVCpu->iem.s.cActiveMappings > 0);
7064
7065 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7066 while (iMemMap-- > 0)
7067 {
7068 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7069 if (fAccess != IEM_ACCESS_INVALID)
7070 {
7071 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7072 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7073 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7074 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7075 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7076 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7077 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7078 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7079 pVCpu->iem.s.cActiveMappings--;
7080 }
7081 }
7082}
7083
7084
7085/*
7086 * Instantiate R/W templates.
7087 */
7088#define TMPL_MEM_WITH_STACK
7089
7090#define TMPL_MEM_TYPE uint8_t
7091#define TMPL_MEM_FN_SUFF U8
7092#define TMPL_MEM_FMT_TYPE "%#04x"
7093#define TMPL_MEM_FMT_DESC "byte"
7094#include "IEMAllMemRWTmpl.cpp.h"
7095
7096#define TMPL_MEM_TYPE uint16_t
7097#define TMPL_MEM_FN_SUFF U16
7098#define TMPL_MEM_FMT_TYPE "%#06x"
7099#define TMPL_MEM_FMT_DESC "word"
7100#include "IEMAllMemRWTmpl.cpp.h"
7101
7102#define TMPL_WITH_PUSH_SREG
7103#define TMPL_MEM_TYPE uint32_t
7104#define TMPL_MEM_FN_SUFF U32
7105#define TMPL_MEM_FMT_TYPE "%#010x"
7106#define TMPL_MEM_FMT_DESC "dword"
7107#include "IEMAllMemRWTmpl.cpp.h"
7108#undef TMPL_WITH_PUSH_SREG
7109
7110#define TMPL_MEM_TYPE uint64_t
7111#define TMPL_MEM_FN_SUFF U64
7112#define TMPL_MEM_FMT_TYPE "%#018RX64"
7113#define TMPL_MEM_FMT_DESC "qword"
7114#include "IEMAllMemRWTmpl.cpp.h"
7115
7116#undef TMPL_MEM_WITH_STACK
7117
7118#define TMPL_MEM_TYPE uint64_t
7119#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7120#define TMPL_MEM_FN_SUFF U64AlignedU128
7121#define TMPL_MEM_FMT_TYPE "%#018RX64"
7122#define TMPL_MEM_FMT_DESC "qword"
7123#include "IEMAllMemRWTmpl.cpp.h"
7124
7125/* See IEMAllMemRWTmplInline.cpp.h */
7126#define TMPL_MEM_BY_REF
7127
7128#define TMPL_MEM_TYPE RTFLOAT80U
7129#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7130#define TMPL_MEM_FN_SUFF R80
7131#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7132#define TMPL_MEM_FMT_DESC "tword"
7133#include "IEMAllMemRWTmpl.cpp.h"
7134
7135#define TMPL_MEM_TYPE RTPBCD80U
7136#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7137#define TMPL_MEM_FN_SUFF D80
7138#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7139#define TMPL_MEM_FMT_DESC "tword"
7140#include "IEMAllMemRWTmpl.cpp.h"
7141
7142#define TMPL_MEM_TYPE RTUINT128U
7143#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7144#define TMPL_MEM_FN_SUFF U128
7145#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7146#define TMPL_MEM_FMT_DESC "dqword"
7147#include "IEMAllMemRWTmpl.cpp.h"
7148
7149#define TMPL_MEM_TYPE RTUINT128U
7150#define TMPL_MEM_TYPE_ALIGN 0
7151#define TMPL_MEM_FN_SUFF U128NoAc
7152#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7153#define TMPL_MEM_FMT_DESC "dqword"
7154#include "IEMAllMemRWTmpl.cpp.h"
7155
7156/**
7157 * Fetches a data dword and zero extends it to a qword.
7158 *
7159 * @returns Strict VBox status code.
7160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7161 * @param pu64Dst Where to return the qword.
7162 * @param iSegReg The index of the segment register to use for
7163 * this access. The base and limits are checked.
7164 * @param GCPtrMem The address of the guest memory.
7165 */
7166VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7167{
7168 /* The lazy approach for now... */
7169 uint8_t bUnmapInfo;
7170 uint32_t const *pu32Src;
7171 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7172 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7173 if (rc == VINF_SUCCESS)
7174 {
7175 *pu64Dst = *pu32Src;
7176 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7177 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7178 }
7179 return rc;
7180}
7181
7182
7183#ifdef SOME_UNUSED_FUNCTION
7184/**
7185 * Fetches a data dword and sign extends it to a qword.
7186 *
7187 * @returns Strict VBox status code.
7188 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7189 * @param pu64Dst Where to return the sign extended value.
7190 * @param iSegReg The index of the segment register to use for
7191 * this access. The base and limits are checked.
7192 * @param GCPtrMem The address of the guest memory.
7193 */
7194VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7195{
7196 /* The lazy approach for now... */
7197 uint8_t bUnmapInfo;
7198 int32_t const *pi32Src;
7199 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7200 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7201 if (rc == VINF_SUCCESS)
7202 {
7203 *pu64Dst = *pi32Src;
7204 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7205 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7206 }
7207#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7208 else
7209 *pu64Dst = 0;
7210#endif
7211 return rc;
7212}
7213#endif
7214
7215
7216/**
7217 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7218 * related.
7219 *
7220 * Raises \#GP(0) if not aligned.
7221 *
7222 * @returns Strict VBox status code.
7223 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7224 * @param pu128Dst Where to return the qword.
7225 * @param iSegReg The index of the segment register to use for
7226 * this access. The base and limits are checked.
7227 * @param GCPtrMem The address of the guest memory.
7228 */
7229VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7230{
7231 /* The lazy approach for now... */
7232 uint8_t bUnmapInfo;
7233 PCRTUINT128U pu128Src;
7234 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem,
7235 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7236 if (rc == VINF_SUCCESS)
7237 {
7238 pu128Dst->au64[0] = pu128Src->au64[0];
7239 pu128Dst->au64[1] = pu128Src->au64[1];
7240 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7241 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7242 }
7243 return rc;
7244}
7245
7246
7247#ifdef IEM_WITH_SETJMP
7248/**
7249 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7250 * related, longjmp on error.
7251 *
7252 * Raises \#GP(0) if not aligned.
7253 *
7254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7255 * @param pu128Dst Where to return the qword.
7256 * @param iSegReg The index of the segment register to use for
7257 * this access. The base and limits are checked.
7258 * @param GCPtrMem The address of the guest memory.
7259 */
7260void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7261 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7262{
7263 /* The lazy approach for now... */
7264 uint8_t bUnmapInfo;
7265 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7266 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7267 pu128Dst->au64[0] = pu128Src->au64[0];
7268 pu128Dst->au64[1] = pu128Src->au64[1];
7269 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7270 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7271}
7272#endif
7273
7274
7275/**
7276 * Fetches a data oword (octo word), generally AVX related.
7277 *
7278 * @returns Strict VBox status code.
7279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7280 * @param pu256Dst Where to return the qword.
7281 * @param iSegReg The index of the segment register to use for
7282 * this access. The base and limits are checked.
7283 * @param GCPtrMem The address of the guest memory.
7284 */
7285VBOXSTRICTRC iemMemFetchDataU256NoAc(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7286{
7287 /* The lazy approach for now... */
7288 uint8_t bUnmapInfo;
7289 PCRTUINT256U pu256Src;
7290 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7291 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7292 if (rc == VINF_SUCCESS)
7293 {
7294 pu256Dst->au64[0] = pu256Src->au64[0];
7295 pu256Dst->au64[1] = pu256Src->au64[1];
7296 pu256Dst->au64[2] = pu256Src->au64[2];
7297 pu256Dst->au64[3] = pu256Src->au64[3];
7298 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7299 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7300 }
7301 return rc;
7302}
7303
7304
7305#ifdef IEM_WITH_SETJMP
7306/**
7307 * Fetches a data oword (octo word), generally AVX related.
7308 *
7309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7310 * @param pu256Dst Where to return the qword.
7311 * @param iSegReg The index of the segment register to use for
7312 * this access. The base and limits are checked.
7313 * @param GCPtrMem The address of the guest memory.
7314 */
7315void iemMemFetchDataU256NoAcJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7316{
7317 /* The lazy approach for now... */
7318 uint8_t bUnmapInfo;
7319 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7320 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7321 pu256Dst->au64[0] = pu256Src->au64[0];
7322 pu256Dst->au64[1] = pu256Src->au64[1];
7323 pu256Dst->au64[2] = pu256Src->au64[2];
7324 pu256Dst->au64[3] = pu256Src->au64[3];
7325 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7326 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7327}
7328#endif
7329
7330
7331/**
7332 * Fetches a data oword (octo word) at an aligned address, generally AVX
7333 * related.
7334 *
7335 * Raises \#GP(0) if not aligned.
7336 *
7337 * @returns Strict VBox status code.
7338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7339 * @param pu256Dst Where to return the qword.
7340 * @param iSegReg The index of the segment register to use for
7341 * this access. The base and limits are checked.
7342 * @param GCPtrMem The address of the guest memory.
7343 */
7344VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7345{
7346 /* The lazy approach for now... */
7347 uint8_t bUnmapInfo;
7348 PCRTUINT256U pu256Src;
7349 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7350 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7351 if (rc == VINF_SUCCESS)
7352 {
7353 pu256Dst->au64[0] = pu256Src->au64[0];
7354 pu256Dst->au64[1] = pu256Src->au64[1];
7355 pu256Dst->au64[2] = pu256Src->au64[2];
7356 pu256Dst->au64[3] = pu256Src->au64[3];
7357 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7358 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7359 }
7360 return rc;
7361}
7362
7363
7364#ifdef IEM_WITH_SETJMP
7365/**
7366 * Fetches a data oword (octo word) at an aligned address, generally AVX
7367 * related, longjmp on error.
7368 *
7369 * Raises \#GP(0) if not aligned.
7370 *
7371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7372 * @param pu256Dst Where to return the qword.
7373 * @param iSegReg The index of the segment register to use for
7374 * this access. The base and limits are checked.
7375 * @param GCPtrMem The address of the guest memory.
7376 */
7377void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7378 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7379{
7380 /* The lazy approach for now... */
7381 uint8_t bUnmapInfo;
7382 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7383 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7384 pu256Dst->au64[0] = pu256Src->au64[0];
7385 pu256Dst->au64[1] = pu256Src->au64[1];
7386 pu256Dst->au64[2] = pu256Src->au64[2];
7387 pu256Dst->au64[3] = pu256Src->au64[3];
7388 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7389 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7390}
7391#endif
7392
7393
7394
7395/**
7396 * Fetches a descriptor register (lgdt, lidt).
7397 *
7398 * @returns Strict VBox status code.
7399 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7400 * @param pcbLimit Where to return the limit.
7401 * @param pGCPtrBase Where to return the base.
7402 * @param iSegReg The index of the segment register to use for
7403 * this access. The base and limits are checked.
7404 * @param GCPtrMem The address of the guest memory.
7405 * @param enmOpSize The effective operand size.
7406 */
7407VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7408 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7409{
7410 /*
7411 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7412 * little special:
7413 * - The two reads are done separately.
7414 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7415 * - We suspect the 386 to actually commit the limit before the base in
7416 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7417 * don't try emulate this eccentric behavior, because it's not well
7418 * enough understood and rather hard to trigger.
7419 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7420 */
7421 VBOXSTRICTRC rcStrict;
7422 if (IEM_IS_64BIT_CODE(pVCpu))
7423 {
7424 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7425 if (rcStrict == VINF_SUCCESS)
7426 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7427 }
7428 else
7429 {
7430 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7431 if (enmOpSize == IEMMODE_32BIT)
7432 {
7433 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7434 {
7435 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7436 if (rcStrict == VINF_SUCCESS)
7437 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7438 }
7439 else
7440 {
7441 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7442 if (rcStrict == VINF_SUCCESS)
7443 {
7444 *pcbLimit = (uint16_t)uTmp;
7445 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7446 }
7447 }
7448 if (rcStrict == VINF_SUCCESS)
7449 *pGCPtrBase = uTmp;
7450 }
7451 else
7452 {
7453 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7454 if (rcStrict == VINF_SUCCESS)
7455 {
7456 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7457 if (rcStrict == VINF_SUCCESS)
7458 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7459 }
7460 }
7461 }
7462 return rcStrict;
7463}
7464
7465
7466/**
7467 * Stores a data dqword, SSE aligned.
7468 *
7469 * @returns Strict VBox status code.
7470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7471 * @param iSegReg The index of the segment register to use for
7472 * this access. The base and limits are checked.
7473 * @param GCPtrMem The address of the guest memory.
7474 * @param u128Value The value to store.
7475 */
7476VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7477{
7478 /* The lazy approach for now... */
7479 uint8_t bUnmapInfo;
7480 PRTUINT128U pu128Dst;
7481 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7482 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7483 if (rc == VINF_SUCCESS)
7484 {
7485 pu128Dst->au64[0] = u128Value.au64[0];
7486 pu128Dst->au64[1] = u128Value.au64[1];
7487 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7488 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7489 }
7490 return rc;
7491}
7492
7493
7494#ifdef IEM_WITH_SETJMP
7495/**
7496 * Stores a data dqword, SSE aligned.
7497 *
7498 * @returns Strict VBox status code.
7499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7500 * @param iSegReg The index of the segment register to use for
7501 * this access. The base and limits are checked.
7502 * @param GCPtrMem The address of the guest memory.
7503 * @param u128Value The value to store.
7504 */
7505void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7506 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7507{
7508 /* The lazy approach for now... */
7509 uint8_t bUnmapInfo;
7510 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7511 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7512 pu128Dst->au64[0] = u128Value.au64[0];
7513 pu128Dst->au64[1] = u128Value.au64[1];
7514 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7515 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7516}
7517#endif
7518
7519
7520/**
7521 * Stores a data dqword.
7522 *
7523 * @returns Strict VBox status code.
7524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7525 * @param iSegReg The index of the segment register to use for
7526 * this access. The base and limits are checked.
7527 * @param GCPtrMem The address of the guest memory.
7528 * @param pu256Value Pointer to the value to store.
7529 */
7530VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7531{
7532 /* The lazy approach for now... */
7533 uint8_t bUnmapInfo;
7534 PRTUINT256U pu256Dst;
7535 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7536 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7537 if (rc == VINF_SUCCESS)
7538 {
7539 pu256Dst->au64[0] = pu256Value->au64[0];
7540 pu256Dst->au64[1] = pu256Value->au64[1];
7541 pu256Dst->au64[2] = pu256Value->au64[2];
7542 pu256Dst->au64[3] = pu256Value->au64[3];
7543 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7544 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7545 }
7546 return rc;
7547}
7548
7549
7550#ifdef IEM_WITH_SETJMP
7551/**
7552 * Stores a data dqword, longjmp on error.
7553 *
7554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7555 * @param iSegReg The index of the segment register to use for
7556 * this access. The base and limits are checked.
7557 * @param GCPtrMem The address of the guest memory.
7558 * @param pu256Value Pointer to the value to store.
7559 */
7560void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7561{
7562 /* The lazy approach for now... */
7563 uint8_t bUnmapInfo;
7564 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7565 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7566 pu256Dst->au64[0] = pu256Value->au64[0];
7567 pu256Dst->au64[1] = pu256Value->au64[1];
7568 pu256Dst->au64[2] = pu256Value->au64[2];
7569 pu256Dst->au64[3] = pu256Value->au64[3];
7570 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7571 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7572}
7573#endif
7574
7575
7576/**
7577 * Stores a data qqword.
7578 *
7579 * @returns Strict VBox status code.
7580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7581 * @param iSegReg The index of the segment register to use for
7582 * this access. The base and limits are checked.
7583 * @param GCPtrMem The address of the guest memory.
7584 * @param pu256Value Pointer to the value to store.
7585 */
7586VBOXSTRICTRC iemMemStoreDataU256NoAc(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7587{
7588 /* The lazy approach for now... */
7589 uint8_t bUnmapInfo;
7590 PRTUINT256U pu256Dst;
7591 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7592 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7593 if (rc == VINF_SUCCESS)
7594 {
7595 pu256Dst->au64[0] = pu256Value->au64[0];
7596 pu256Dst->au64[1] = pu256Value->au64[1];
7597 pu256Dst->au64[2] = pu256Value->au64[2];
7598 pu256Dst->au64[3] = pu256Value->au64[3];
7599 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7600 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7601 }
7602 return rc;
7603}
7604
7605
7606#ifdef IEM_WITH_SETJMP
7607/**
7608 * Stores a data qqword, longjmp on error.
7609 *
7610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7611 * @param iSegReg The index of the segment register to use for
7612 * this access. The base and limits are checked.
7613 * @param GCPtrMem The address of the guest memory.
7614 * @param pu256Value Pointer to the value to store.
7615 */
7616void iemMemStoreDataU256NoAcJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7617{
7618 /* The lazy approach for now... */
7619 uint8_t bUnmapInfo;
7620 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7621 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7622 pu256Dst->au64[0] = pu256Value->au64[0];
7623 pu256Dst->au64[1] = pu256Value->au64[1];
7624 pu256Dst->au64[2] = pu256Value->au64[2];
7625 pu256Dst->au64[3] = pu256Value->au64[3];
7626 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7627 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7628}
7629#endif
7630
7631
7632/**
7633 * Stores a data dqword, AVX \#GP(0) aligned.
7634 *
7635 * @returns Strict VBox status code.
7636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7637 * @param iSegReg The index of the segment register to use for
7638 * this access. The base and limits are checked.
7639 * @param GCPtrMem The address of the guest memory.
7640 * @param pu256Value Pointer to the value to store.
7641 */
7642VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7643{
7644 /* The lazy approach for now... */
7645 uint8_t bUnmapInfo;
7646 PRTUINT256U pu256Dst;
7647 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7648 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7649 if (rc == VINF_SUCCESS)
7650 {
7651 pu256Dst->au64[0] = pu256Value->au64[0];
7652 pu256Dst->au64[1] = pu256Value->au64[1];
7653 pu256Dst->au64[2] = pu256Value->au64[2];
7654 pu256Dst->au64[3] = pu256Value->au64[3];
7655 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7656 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7657 }
7658 return rc;
7659}
7660
7661
7662#ifdef IEM_WITH_SETJMP
7663/**
7664 * Stores a data dqword, AVX aligned.
7665 *
7666 * @returns Strict VBox status code.
7667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7668 * @param iSegReg The index of the segment register to use for
7669 * this access. The base and limits are checked.
7670 * @param GCPtrMem The address of the guest memory.
7671 * @param pu256Value Pointer to the value to store.
7672 */
7673void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7674 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7675{
7676 /* The lazy approach for now... */
7677 uint8_t bUnmapInfo;
7678 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7679 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7680 pu256Dst->au64[0] = pu256Value->au64[0];
7681 pu256Dst->au64[1] = pu256Value->au64[1];
7682 pu256Dst->au64[2] = pu256Value->au64[2];
7683 pu256Dst->au64[3] = pu256Value->au64[3];
7684 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7685 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7686}
7687#endif
7688
7689
7690/**
7691 * Stores a descriptor register (sgdt, sidt).
7692 *
7693 * @returns Strict VBox status code.
7694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7695 * @param cbLimit The limit.
7696 * @param GCPtrBase The base address.
7697 * @param iSegReg The index of the segment register to use for
7698 * this access. The base and limits are checked.
7699 * @param GCPtrMem The address of the guest memory.
7700 */
7701VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7702{
7703 /*
7704 * The SIDT and SGDT instructions actually stores the data using two
7705 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7706 * does not respond to opsize prefixes.
7707 */
7708 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7709 if (rcStrict == VINF_SUCCESS)
7710 {
7711 if (IEM_IS_16BIT_CODE(pVCpu))
7712 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7713 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7714 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7715 else if (IEM_IS_32BIT_CODE(pVCpu))
7716 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7717 else
7718 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7719 }
7720 return rcStrict;
7721}
7722
7723
7724/**
7725 * Begin a special stack push (used by interrupt, exceptions and such).
7726 *
7727 * This will raise \#SS or \#PF if appropriate.
7728 *
7729 * @returns Strict VBox status code.
7730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7731 * @param cbMem The number of bytes to push onto the stack.
7732 * @param cbAlign The alignment mask (7, 3, 1).
7733 * @param ppvMem Where to return the pointer to the stack memory.
7734 * As with the other memory functions this could be
7735 * direct access or bounce buffered access, so
7736 * don't commit register until the commit call
7737 * succeeds.
7738 * @param pbUnmapInfo Where to store unmap info for
7739 * iemMemStackPushCommitSpecial.
7740 * @param puNewRsp Where to return the new RSP value. This must be
7741 * passed unchanged to
7742 * iemMemStackPushCommitSpecial().
7743 */
7744VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7745 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7746{
7747 Assert(cbMem < UINT8_MAX);
7748 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7749 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7750}
7751
7752
7753/**
7754 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7755 *
7756 * This will update the rSP.
7757 *
7758 * @returns Strict VBox status code.
7759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7760 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7761 * @param uNewRsp The new RSP value returned by
7762 * iemMemStackPushBeginSpecial().
7763 */
7764VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7765{
7766 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7767 if (rcStrict == VINF_SUCCESS)
7768 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7769 return rcStrict;
7770}
7771
7772
7773/**
7774 * Begin a special stack pop (used by iret, retf and such).
7775 *
7776 * This will raise \#SS or \#PF if appropriate.
7777 *
7778 * @returns Strict VBox status code.
7779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7780 * @param cbMem The number of bytes to pop from the stack.
7781 * @param cbAlign The alignment mask (7, 3, 1).
7782 * @param ppvMem Where to return the pointer to the stack memory.
7783 * @param pbUnmapInfo Where to store unmap info for
7784 * iemMemStackPopDoneSpecial.
7785 * @param puNewRsp Where to return the new RSP value. This must be
7786 * assigned to CPUMCTX::rsp manually some time
7787 * after iemMemStackPopDoneSpecial() has been
7788 * called.
7789 */
7790VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7791 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7792{
7793 Assert(cbMem < UINT8_MAX);
7794 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7795 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7796}
7797
7798
7799/**
7800 * Continue a special stack pop (used by iret and retf), for the purpose of
7801 * retrieving a new stack pointer.
7802 *
7803 * This will raise \#SS or \#PF if appropriate.
7804 *
7805 * @returns Strict VBox status code.
7806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7807 * @param off Offset from the top of the stack. This is zero
7808 * except in the retf case.
7809 * @param cbMem The number of bytes to pop from the stack.
7810 * @param ppvMem Where to return the pointer to the stack memory.
7811 * @param pbUnmapInfo Where to store unmap info for
7812 * iemMemStackPopDoneSpecial.
7813 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7814 * return this because all use of this function is
7815 * to retrieve a new value and anything we return
7816 * here would be discarded.)
7817 */
7818VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7819 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7820{
7821 Assert(cbMem < UINT8_MAX);
7822
7823 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7824 RTGCPTR GCPtrTop;
7825 if (IEM_IS_64BIT_CODE(pVCpu))
7826 GCPtrTop = uCurNewRsp;
7827 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7828 GCPtrTop = (uint32_t)uCurNewRsp;
7829 else
7830 GCPtrTop = (uint16_t)uCurNewRsp;
7831
7832 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7833 0 /* checked in iemMemStackPopBeginSpecial */);
7834}
7835
7836
7837/**
7838 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7839 * iemMemStackPopContinueSpecial).
7840 *
7841 * The caller will manually commit the rSP.
7842 *
7843 * @returns Strict VBox status code.
7844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7845 * @param bUnmapInfo Unmap information returned by
7846 * iemMemStackPopBeginSpecial() or
7847 * iemMemStackPopContinueSpecial().
7848 */
7849VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7850{
7851 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7852}
7853
7854
7855/**
7856 * Fetches a system table byte.
7857 *
7858 * @returns Strict VBox status code.
7859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7860 * @param pbDst Where to return the byte.
7861 * @param iSegReg The index of the segment register to use for
7862 * this access. The base and limits are checked.
7863 * @param GCPtrMem The address of the guest memory.
7864 */
7865VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7866{
7867 /* The lazy approach for now... */
7868 uint8_t bUnmapInfo;
7869 uint8_t const *pbSrc;
7870 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7871 if (rc == VINF_SUCCESS)
7872 {
7873 *pbDst = *pbSrc;
7874 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7875 }
7876 return rc;
7877}
7878
7879
7880/**
7881 * Fetches a system table word.
7882 *
7883 * @returns Strict VBox status code.
7884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7885 * @param pu16Dst Where to return the word.
7886 * @param iSegReg The index of the segment register to use for
7887 * this access. The base and limits are checked.
7888 * @param GCPtrMem The address of the guest memory.
7889 */
7890VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7891{
7892 /* The lazy approach for now... */
7893 uint8_t bUnmapInfo;
7894 uint16_t const *pu16Src;
7895 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7896 if (rc == VINF_SUCCESS)
7897 {
7898 *pu16Dst = *pu16Src;
7899 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7900 }
7901 return rc;
7902}
7903
7904
7905/**
7906 * Fetches a system table dword.
7907 *
7908 * @returns Strict VBox status code.
7909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7910 * @param pu32Dst Where to return the dword.
7911 * @param iSegReg The index of the segment register to use for
7912 * this access. The base and limits are checked.
7913 * @param GCPtrMem The address of the guest memory.
7914 */
7915VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7916{
7917 /* The lazy approach for now... */
7918 uint8_t bUnmapInfo;
7919 uint32_t const *pu32Src;
7920 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7921 if (rc == VINF_SUCCESS)
7922 {
7923 *pu32Dst = *pu32Src;
7924 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7925 }
7926 return rc;
7927}
7928
7929
7930/**
7931 * Fetches a system table qword.
7932 *
7933 * @returns Strict VBox status code.
7934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7935 * @param pu64Dst Where to return the qword.
7936 * @param iSegReg The index of the segment register to use for
7937 * this access. The base and limits are checked.
7938 * @param GCPtrMem The address of the guest memory.
7939 */
7940VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7941{
7942 /* The lazy approach for now... */
7943 uint8_t bUnmapInfo;
7944 uint64_t const *pu64Src;
7945 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7946 if (rc == VINF_SUCCESS)
7947 {
7948 *pu64Dst = *pu64Src;
7949 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7950 }
7951 return rc;
7952}
7953
7954
7955/**
7956 * Fetches a descriptor table entry with caller specified error code.
7957 *
7958 * @returns Strict VBox status code.
7959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7960 * @param pDesc Where to return the descriptor table entry.
7961 * @param uSel The selector which table entry to fetch.
7962 * @param uXcpt The exception to raise on table lookup error.
7963 * @param uErrorCode The error code associated with the exception.
7964 */
7965static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
7966 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
7967{
7968 AssertPtr(pDesc);
7969 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
7970
7971 /** @todo did the 286 require all 8 bytes to be accessible? */
7972 /*
7973 * Get the selector table base and check bounds.
7974 */
7975 RTGCPTR GCPtrBase;
7976 if (uSel & X86_SEL_LDT)
7977 {
7978 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
7979 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
7980 {
7981 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
7982 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
7983 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7984 uErrorCode, 0);
7985 }
7986
7987 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
7988 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
7989 }
7990 else
7991 {
7992 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
7993 {
7994 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
7995 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7996 uErrorCode, 0);
7997 }
7998 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
7999 }
8000
8001 /*
8002 * Read the legacy descriptor and maybe the long mode extensions if
8003 * required.
8004 */
8005 VBOXSTRICTRC rcStrict;
8006 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8007 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8008 else
8009 {
8010 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8011 if (rcStrict == VINF_SUCCESS)
8012 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8013 if (rcStrict == VINF_SUCCESS)
8014 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8015 if (rcStrict == VINF_SUCCESS)
8016 pDesc->Legacy.au16[3] = 0;
8017 else
8018 return rcStrict;
8019 }
8020
8021 if (rcStrict == VINF_SUCCESS)
8022 {
8023 if ( !IEM_IS_LONG_MODE(pVCpu)
8024 || pDesc->Legacy.Gen.u1DescType)
8025 pDesc->Long.au64[1] = 0;
8026 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8027 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8028 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8029 else
8030 {
8031 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8032 /** @todo is this the right exception? */
8033 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8034 }
8035 }
8036 return rcStrict;
8037}
8038
8039
8040/**
8041 * Fetches a descriptor table entry.
8042 *
8043 * @returns Strict VBox status code.
8044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8045 * @param pDesc Where to return the descriptor table entry.
8046 * @param uSel The selector which table entry to fetch.
8047 * @param uXcpt The exception to raise on table lookup error.
8048 */
8049VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8050{
8051 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8052}
8053
8054
8055/**
8056 * Marks the selector descriptor as accessed (only non-system descriptors).
8057 *
8058 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8059 * will therefore skip the limit checks.
8060 *
8061 * @returns Strict VBox status code.
8062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8063 * @param uSel The selector.
8064 */
8065VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8066{
8067 /*
8068 * Get the selector table base and calculate the entry address.
8069 */
8070 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8071 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8072 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8073 GCPtr += uSel & X86_SEL_MASK;
8074
8075 /*
8076 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8077 * ugly stuff to avoid this. This will make sure it's an atomic access
8078 * as well more or less remove any question about 8-bit or 32-bit accesss.
8079 */
8080 VBOXSTRICTRC rcStrict;
8081 uint8_t bUnmapInfo;
8082 uint32_t volatile *pu32;
8083 if ((GCPtr & 3) == 0)
8084 {
8085 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8086 GCPtr += 2 + 2;
8087 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8088 if (rcStrict != VINF_SUCCESS)
8089 return rcStrict;
8090 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8091 }
8092 else
8093 {
8094 /* The misaligned GDT/LDT case, map the whole thing. */
8095 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8096 if (rcStrict != VINF_SUCCESS)
8097 return rcStrict;
8098 switch ((uintptr_t)pu32 & 3)
8099 {
8100 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8101 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8102 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8103 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8104 }
8105 }
8106
8107 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8108}
8109
8110
8111#undef LOG_GROUP
8112#define LOG_GROUP LOG_GROUP_IEM
8113
8114/** @} */
8115
8116/** @name Opcode Helpers.
8117 * @{
8118 */
8119
8120/**
8121 * Calculates the effective address of a ModR/M memory operand.
8122 *
8123 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8124 *
8125 * @return Strict VBox status code.
8126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8127 * @param bRm The ModRM byte.
8128 * @param cbImmAndRspOffset - First byte: The size of any immediate
8129 * following the effective address opcode bytes
8130 * (only for RIP relative addressing).
8131 * - Second byte: RSP displacement (for POP [ESP]).
8132 * @param pGCPtrEff Where to return the effective address.
8133 */
8134VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8135{
8136 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8137# define SET_SS_DEF() \
8138 do \
8139 { \
8140 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8141 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8142 } while (0)
8143
8144 if (!IEM_IS_64BIT_CODE(pVCpu))
8145 {
8146/** @todo Check the effective address size crap! */
8147 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8148 {
8149 uint16_t u16EffAddr;
8150
8151 /* Handle the disp16 form with no registers first. */
8152 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8153 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8154 else
8155 {
8156 /* Get the displacment. */
8157 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8158 {
8159 case 0: u16EffAddr = 0; break;
8160 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8161 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8162 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8163 }
8164
8165 /* Add the base and index registers to the disp. */
8166 switch (bRm & X86_MODRM_RM_MASK)
8167 {
8168 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8169 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8170 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8171 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8172 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8173 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8174 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8175 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8176 }
8177 }
8178
8179 *pGCPtrEff = u16EffAddr;
8180 }
8181 else
8182 {
8183 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8184 uint32_t u32EffAddr;
8185
8186 /* Handle the disp32 form with no registers first. */
8187 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8188 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8189 else
8190 {
8191 /* Get the register (or SIB) value. */
8192 switch ((bRm & X86_MODRM_RM_MASK))
8193 {
8194 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8195 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8196 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8197 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8198 case 4: /* SIB */
8199 {
8200 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8201
8202 /* Get the index and scale it. */
8203 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8204 {
8205 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8206 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8207 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8208 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8209 case 4: u32EffAddr = 0; /*none */ break;
8210 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8211 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8212 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8213 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8214 }
8215 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8216
8217 /* add base */
8218 switch (bSib & X86_SIB_BASE_MASK)
8219 {
8220 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8221 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8222 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8223 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8224 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8225 case 5:
8226 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8227 {
8228 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8229 SET_SS_DEF();
8230 }
8231 else
8232 {
8233 uint32_t u32Disp;
8234 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8235 u32EffAddr += u32Disp;
8236 }
8237 break;
8238 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8239 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8240 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8241 }
8242 break;
8243 }
8244 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8245 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8246 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8247 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8248 }
8249
8250 /* Get and add the displacement. */
8251 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8252 {
8253 case 0:
8254 break;
8255 case 1:
8256 {
8257 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8258 u32EffAddr += i8Disp;
8259 break;
8260 }
8261 case 2:
8262 {
8263 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8264 u32EffAddr += u32Disp;
8265 break;
8266 }
8267 default:
8268 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8269 }
8270
8271 }
8272 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8273 *pGCPtrEff = u32EffAddr;
8274 }
8275 }
8276 else
8277 {
8278 uint64_t u64EffAddr;
8279
8280 /* Handle the rip+disp32 form with no registers first. */
8281 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8282 {
8283 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8284 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8285 }
8286 else
8287 {
8288 /* Get the register (or SIB) value. */
8289 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8290 {
8291 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8292 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8293 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8294 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8295 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8296 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8297 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8298 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8299 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8300 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8301 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8302 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8303 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8304 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8305 /* SIB */
8306 case 4:
8307 case 12:
8308 {
8309 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8310
8311 /* Get the index and scale it. */
8312 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8313 {
8314 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8315 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8316 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8317 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8318 case 4: u64EffAddr = 0; /*none */ break;
8319 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8320 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8321 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8322 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8323 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8324 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8325 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8326 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8327 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8328 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8329 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8330 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8331 }
8332 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8333
8334 /* add base */
8335 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8336 {
8337 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8338 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8339 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8340 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8341 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8342 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8343 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8344 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8345 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8346 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8347 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8348 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8349 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8350 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8351 /* complicated encodings */
8352 case 5:
8353 case 13:
8354 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8355 {
8356 if (!pVCpu->iem.s.uRexB)
8357 {
8358 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8359 SET_SS_DEF();
8360 }
8361 else
8362 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8363 }
8364 else
8365 {
8366 uint32_t u32Disp;
8367 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8368 u64EffAddr += (int32_t)u32Disp;
8369 }
8370 break;
8371 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8372 }
8373 break;
8374 }
8375 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8376 }
8377
8378 /* Get and add the displacement. */
8379 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8380 {
8381 case 0:
8382 break;
8383 case 1:
8384 {
8385 int8_t i8Disp;
8386 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8387 u64EffAddr += i8Disp;
8388 break;
8389 }
8390 case 2:
8391 {
8392 uint32_t u32Disp;
8393 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8394 u64EffAddr += (int32_t)u32Disp;
8395 break;
8396 }
8397 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8398 }
8399
8400 }
8401
8402 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8403 *pGCPtrEff = u64EffAddr;
8404 else
8405 {
8406 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8407 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8408 }
8409 }
8410
8411 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8412 return VINF_SUCCESS;
8413}
8414
8415
8416#ifdef IEM_WITH_SETJMP
8417/**
8418 * Calculates the effective address of a ModR/M memory operand.
8419 *
8420 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8421 *
8422 * May longjmp on internal error.
8423 *
8424 * @return The effective address.
8425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8426 * @param bRm The ModRM byte.
8427 * @param cbImmAndRspOffset - First byte: The size of any immediate
8428 * following the effective address opcode bytes
8429 * (only for RIP relative addressing).
8430 * - Second byte: RSP displacement (for POP [ESP]).
8431 */
8432RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8433{
8434 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8435# define SET_SS_DEF() \
8436 do \
8437 { \
8438 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8439 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8440 } while (0)
8441
8442 if (!IEM_IS_64BIT_CODE(pVCpu))
8443 {
8444/** @todo Check the effective address size crap! */
8445 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8446 {
8447 uint16_t u16EffAddr;
8448
8449 /* Handle the disp16 form with no registers first. */
8450 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8451 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8452 else
8453 {
8454 /* Get the displacment. */
8455 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8456 {
8457 case 0: u16EffAddr = 0; break;
8458 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8459 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8460 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8461 }
8462
8463 /* Add the base and index registers to the disp. */
8464 switch (bRm & X86_MODRM_RM_MASK)
8465 {
8466 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8467 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8468 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8469 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8470 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8471 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8472 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8473 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8474 }
8475 }
8476
8477 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8478 return u16EffAddr;
8479 }
8480
8481 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8482 uint32_t u32EffAddr;
8483
8484 /* Handle the disp32 form with no registers first. */
8485 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8486 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8487 else
8488 {
8489 /* Get the register (or SIB) value. */
8490 switch ((bRm & X86_MODRM_RM_MASK))
8491 {
8492 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8493 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8494 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8495 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8496 case 4: /* SIB */
8497 {
8498 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8499
8500 /* Get the index and scale it. */
8501 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8502 {
8503 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8504 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8505 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8506 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8507 case 4: u32EffAddr = 0; /*none */ break;
8508 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8509 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8510 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8511 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8512 }
8513 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8514
8515 /* add base */
8516 switch (bSib & X86_SIB_BASE_MASK)
8517 {
8518 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8519 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8520 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8521 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8522 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8523 case 5:
8524 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8525 {
8526 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8527 SET_SS_DEF();
8528 }
8529 else
8530 {
8531 uint32_t u32Disp;
8532 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8533 u32EffAddr += u32Disp;
8534 }
8535 break;
8536 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8537 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8538 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8539 }
8540 break;
8541 }
8542 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8543 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8544 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8545 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8546 }
8547
8548 /* Get and add the displacement. */
8549 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8550 {
8551 case 0:
8552 break;
8553 case 1:
8554 {
8555 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8556 u32EffAddr += i8Disp;
8557 break;
8558 }
8559 case 2:
8560 {
8561 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8562 u32EffAddr += u32Disp;
8563 break;
8564 }
8565 default:
8566 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8567 }
8568 }
8569
8570 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8571 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8572 return u32EffAddr;
8573 }
8574
8575 uint64_t u64EffAddr;
8576
8577 /* Handle the rip+disp32 form with no registers first. */
8578 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8579 {
8580 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8581 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8582 }
8583 else
8584 {
8585 /* Get the register (or SIB) value. */
8586 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8587 {
8588 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8589 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8590 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8591 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8592 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8593 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8594 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8595 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8596 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8597 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8598 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8599 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8600 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8601 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8602 /* SIB */
8603 case 4:
8604 case 12:
8605 {
8606 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8607
8608 /* Get the index and scale it. */
8609 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8610 {
8611 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8612 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8613 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8614 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8615 case 4: u64EffAddr = 0; /*none */ break;
8616 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8617 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8618 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8619 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8620 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8621 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8622 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8623 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8624 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8625 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8626 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8627 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8628 }
8629 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8630
8631 /* add base */
8632 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8633 {
8634 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8635 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8636 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8637 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8638 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8639 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8640 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8641 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8642 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8643 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8644 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8645 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8646 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8647 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8648 /* complicated encodings */
8649 case 5:
8650 case 13:
8651 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8652 {
8653 if (!pVCpu->iem.s.uRexB)
8654 {
8655 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8656 SET_SS_DEF();
8657 }
8658 else
8659 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8660 }
8661 else
8662 {
8663 uint32_t u32Disp;
8664 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8665 u64EffAddr += (int32_t)u32Disp;
8666 }
8667 break;
8668 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8669 }
8670 break;
8671 }
8672 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8673 }
8674
8675 /* Get and add the displacement. */
8676 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8677 {
8678 case 0:
8679 break;
8680 case 1:
8681 {
8682 int8_t i8Disp;
8683 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8684 u64EffAddr += i8Disp;
8685 break;
8686 }
8687 case 2:
8688 {
8689 uint32_t u32Disp;
8690 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8691 u64EffAddr += (int32_t)u32Disp;
8692 break;
8693 }
8694 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8695 }
8696
8697 }
8698
8699 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8700 {
8701 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8702 return u64EffAddr;
8703 }
8704 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8705 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8706 return u64EffAddr & UINT32_MAX;
8707}
8708#endif /* IEM_WITH_SETJMP */
8709
8710
8711/**
8712 * Calculates the effective address of a ModR/M memory operand, extended version
8713 * for use in the recompilers.
8714 *
8715 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8716 *
8717 * @return Strict VBox status code.
8718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8719 * @param bRm The ModRM byte.
8720 * @param cbImmAndRspOffset - First byte: The size of any immediate
8721 * following the effective address opcode bytes
8722 * (only for RIP relative addressing).
8723 * - Second byte: RSP displacement (for POP [ESP]).
8724 * @param pGCPtrEff Where to return the effective address.
8725 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8726 * SIB byte (bits 39:32).
8727 */
8728VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8729{
8730 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8731# define SET_SS_DEF() \
8732 do \
8733 { \
8734 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8735 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8736 } while (0)
8737
8738 uint64_t uInfo;
8739 if (!IEM_IS_64BIT_CODE(pVCpu))
8740 {
8741/** @todo Check the effective address size crap! */
8742 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8743 {
8744 uint16_t u16EffAddr;
8745
8746 /* Handle the disp16 form with no registers first. */
8747 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8748 {
8749 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8750 uInfo = u16EffAddr;
8751 }
8752 else
8753 {
8754 /* Get the displacment. */
8755 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8756 {
8757 case 0: u16EffAddr = 0; break;
8758 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8759 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8760 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8761 }
8762 uInfo = u16EffAddr;
8763
8764 /* Add the base and index registers to the disp. */
8765 switch (bRm & X86_MODRM_RM_MASK)
8766 {
8767 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8768 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8769 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8770 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8771 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8772 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8773 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8774 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8775 }
8776 }
8777
8778 *pGCPtrEff = u16EffAddr;
8779 }
8780 else
8781 {
8782 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8783 uint32_t u32EffAddr;
8784
8785 /* Handle the disp32 form with no registers first. */
8786 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8787 {
8788 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8789 uInfo = u32EffAddr;
8790 }
8791 else
8792 {
8793 /* Get the register (or SIB) value. */
8794 uInfo = 0;
8795 switch ((bRm & X86_MODRM_RM_MASK))
8796 {
8797 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8798 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8799 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8800 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8801 case 4: /* SIB */
8802 {
8803 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8804 uInfo = (uint64_t)bSib << 32;
8805
8806 /* Get the index and scale it. */
8807 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8808 {
8809 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8810 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8811 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8812 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8813 case 4: u32EffAddr = 0; /*none */ break;
8814 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8815 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8816 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8817 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8818 }
8819 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8820
8821 /* add base */
8822 switch (bSib & X86_SIB_BASE_MASK)
8823 {
8824 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8825 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8826 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8827 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8828 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8829 case 5:
8830 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8831 {
8832 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8833 SET_SS_DEF();
8834 }
8835 else
8836 {
8837 uint32_t u32Disp;
8838 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8839 u32EffAddr += u32Disp;
8840 uInfo |= u32Disp;
8841 }
8842 break;
8843 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8844 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8845 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8846 }
8847 break;
8848 }
8849 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8850 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8851 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8852 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8853 }
8854
8855 /* Get and add the displacement. */
8856 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8857 {
8858 case 0:
8859 break;
8860 case 1:
8861 {
8862 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8863 u32EffAddr += i8Disp;
8864 uInfo |= (uint32_t)(int32_t)i8Disp;
8865 break;
8866 }
8867 case 2:
8868 {
8869 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8870 u32EffAddr += u32Disp;
8871 uInfo |= (uint32_t)u32Disp;
8872 break;
8873 }
8874 default:
8875 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8876 }
8877
8878 }
8879 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8880 *pGCPtrEff = u32EffAddr;
8881 }
8882 }
8883 else
8884 {
8885 uint64_t u64EffAddr;
8886
8887 /* Handle the rip+disp32 form with no registers first. */
8888 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8889 {
8890 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8891 uInfo = (uint32_t)u64EffAddr;
8892 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8893 }
8894 else
8895 {
8896 /* Get the register (or SIB) value. */
8897 uInfo = 0;
8898 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8899 {
8900 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8901 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8902 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8903 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8904 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8905 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8906 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8907 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8908 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8909 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8910 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8911 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8912 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8913 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8914 /* SIB */
8915 case 4:
8916 case 12:
8917 {
8918 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8919 uInfo = (uint64_t)bSib << 32;
8920
8921 /* Get the index and scale it. */
8922 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8923 {
8924 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8925 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8926 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8927 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8928 case 4: u64EffAddr = 0; /*none */ break;
8929 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8930 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8931 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8932 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8933 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8934 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8935 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8936 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8937 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8938 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8939 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8940 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8941 }
8942 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8943
8944 /* add base */
8945 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8946 {
8947 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8948 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8949 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8950 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8951 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8952 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8953 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8954 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8955 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8956 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8957 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8958 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8959 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8960 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8961 /* complicated encodings */
8962 case 5:
8963 case 13:
8964 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8965 {
8966 if (!pVCpu->iem.s.uRexB)
8967 {
8968 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8969 SET_SS_DEF();
8970 }
8971 else
8972 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8973 }
8974 else
8975 {
8976 uint32_t u32Disp;
8977 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8978 u64EffAddr += (int32_t)u32Disp;
8979 uInfo |= u32Disp;
8980 }
8981 break;
8982 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8983 }
8984 break;
8985 }
8986 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8987 }
8988
8989 /* Get and add the displacement. */
8990 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8991 {
8992 case 0:
8993 break;
8994 case 1:
8995 {
8996 int8_t i8Disp;
8997 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8998 u64EffAddr += i8Disp;
8999 uInfo |= (uint32_t)(int32_t)i8Disp;
9000 break;
9001 }
9002 case 2:
9003 {
9004 uint32_t u32Disp;
9005 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9006 u64EffAddr += (int32_t)u32Disp;
9007 uInfo |= u32Disp;
9008 break;
9009 }
9010 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9011 }
9012
9013 }
9014
9015 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9016 *pGCPtrEff = u64EffAddr;
9017 else
9018 {
9019 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9020 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9021 }
9022 }
9023 *puInfo = uInfo;
9024
9025 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9026 return VINF_SUCCESS;
9027}
9028
9029/** @} */
9030
9031
9032#ifdef LOG_ENABLED
9033/**
9034 * Logs the current instruction.
9035 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9036 * @param fSameCtx Set if we have the same context information as the VMM,
9037 * clear if we may have already executed an instruction in
9038 * our debug context. When clear, we assume IEMCPU holds
9039 * valid CPU mode info.
9040 *
9041 * The @a fSameCtx parameter is now misleading and obsolete.
9042 * @param pszFunction The IEM function doing the execution.
9043 */
9044static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9045{
9046# ifdef IN_RING3
9047 if (LogIs2Enabled())
9048 {
9049 char szInstr[256];
9050 uint32_t cbInstr = 0;
9051 if (fSameCtx)
9052 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9053 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9054 szInstr, sizeof(szInstr), &cbInstr);
9055 else
9056 {
9057 uint32_t fFlags = 0;
9058 switch (IEM_GET_CPU_MODE(pVCpu))
9059 {
9060 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9061 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9062 case IEMMODE_16BIT:
9063 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9064 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9065 else
9066 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9067 break;
9068 }
9069 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9070 szInstr, sizeof(szInstr), &cbInstr);
9071 }
9072
9073 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9074 Log2(("**** %s fExec=%x\n"
9075 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9076 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9077 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9078 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9079 " %s\n"
9080 , pszFunction, pVCpu->iem.s.fExec,
9081 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9082 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9083 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9084 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9085 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9086 szInstr));
9087
9088 /* This stuff sucks atm. as it fills the log with MSRs. */
9089 //if (LogIs3Enabled())
9090 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9091 }
9092 else
9093# endif
9094 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9095 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9096 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9097}
9098#endif /* LOG_ENABLED */
9099
9100
9101#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9102/**
9103 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9104 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9105 *
9106 * @returns Modified rcStrict.
9107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9108 * @param rcStrict The instruction execution status.
9109 */
9110static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9111{
9112 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9113 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9114 {
9115 /* VMX preemption timer takes priority over NMI-window exits. */
9116 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9117 {
9118 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9119 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9120 }
9121 /*
9122 * Check remaining intercepts.
9123 *
9124 * NMI-window and Interrupt-window VM-exits.
9125 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9126 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9127 *
9128 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9129 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9130 */
9131 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9132 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9133 && !TRPMHasTrap(pVCpu))
9134 {
9135 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9136 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9137 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9138 {
9139 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9140 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9141 }
9142 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9143 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9144 {
9145 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9146 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9147 }
9148 }
9149 }
9150 /* TPR-below threshold/APIC write has the highest priority. */
9151 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9152 {
9153 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9154 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9155 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9156 }
9157 /* MTF takes priority over VMX-preemption timer. */
9158 else
9159 {
9160 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9161 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9162 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9163 }
9164 return rcStrict;
9165}
9166#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9167
9168
9169/**
9170 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9171 * IEMExecOneWithPrefetchedByPC.
9172 *
9173 * Similar code is found in IEMExecLots.
9174 *
9175 * @return Strict VBox status code.
9176 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9177 * @param fExecuteInhibit If set, execute the instruction following CLI,
9178 * POP SS and MOV SS,GR.
9179 * @param pszFunction The calling function name.
9180 */
9181DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9182{
9183 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9184 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9185 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9186 RT_NOREF_PV(pszFunction);
9187
9188#ifdef IEM_WITH_SETJMP
9189 VBOXSTRICTRC rcStrict;
9190 IEM_TRY_SETJMP(pVCpu, rcStrict)
9191 {
9192 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9193 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9194 }
9195 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9196 {
9197 pVCpu->iem.s.cLongJumps++;
9198 }
9199 IEM_CATCH_LONGJMP_END(pVCpu);
9200#else
9201 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9202 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9203#endif
9204 if (rcStrict == VINF_SUCCESS)
9205 pVCpu->iem.s.cInstructions++;
9206 if (pVCpu->iem.s.cActiveMappings > 0)
9207 {
9208 Assert(rcStrict != VINF_SUCCESS);
9209 iemMemRollback(pVCpu);
9210 }
9211 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9212 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9213 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9214
9215//#ifdef DEBUG
9216// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9217//#endif
9218
9219#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9220 /*
9221 * Perform any VMX nested-guest instruction boundary actions.
9222 *
9223 * If any of these causes a VM-exit, we must skip executing the next
9224 * instruction (would run into stale page tables). A VM-exit makes sure
9225 * there is no interrupt-inhibition, so that should ensure we don't go
9226 * to try execute the next instruction. Clearing fExecuteInhibit is
9227 * problematic because of the setjmp/longjmp clobbering above.
9228 */
9229 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9230 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9231 || rcStrict != VINF_SUCCESS)
9232 { /* likely */ }
9233 else
9234 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9235#endif
9236
9237 /* Execute the next instruction as well if a cli, pop ss or
9238 mov ss, Gr has just completed successfully. */
9239 if ( fExecuteInhibit
9240 && rcStrict == VINF_SUCCESS
9241 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9242 {
9243 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9244 if (rcStrict == VINF_SUCCESS)
9245 {
9246#ifdef LOG_ENABLED
9247 iemLogCurInstr(pVCpu, false, pszFunction);
9248#endif
9249#ifdef IEM_WITH_SETJMP
9250 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9251 {
9252 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9253 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9254 }
9255 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9256 {
9257 pVCpu->iem.s.cLongJumps++;
9258 }
9259 IEM_CATCH_LONGJMP_END(pVCpu);
9260#else
9261 IEM_OPCODE_GET_FIRST_U8(&b);
9262 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9263#endif
9264 if (rcStrict == VINF_SUCCESS)
9265 {
9266 pVCpu->iem.s.cInstructions++;
9267#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9268 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9269 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9270 { /* likely */ }
9271 else
9272 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9273#endif
9274 }
9275 if (pVCpu->iem.s.cActiveMappings > 0)
9276 {
9277 Assert(rcStrict != VINF_SUCCESS);
9278 iemMemRollback(pVCpu);
9279 }
9280 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9281 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9282 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9283 }
9284 else if (pVCpu->iem.s.cActiveMappings > 0)
9285 iemMemRollback(pVCpu);
9286 /** @todo drop this after we bake this change into RIP advancing. */
9287 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9288 }
9289
9290 /*
9291 * Return value fiddling, statistics and sanity assertions.
9292 */
9293 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9294
9295 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9296 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9297 return rcStrict;
9298}
9299
9300
9301/**
9302 * Execute one instruction.
9303 *
9304 * @return Strict VBox status code.
9305 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9306 */
9307VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9308{
9309 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9310#ifdef LOG_ENABLED
9311 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9312#endif
9313
9314 /*
9315 * Do the decoding and emulation.
9316 */
9317 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9318 if (rcStrict == VINF_SUCCESS)
9319 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9320 else if (pVCpu->iem.s.cActiveMappings > 0)
9321 iemMemRollback(pVCpu);
9322
9323 if (rcStrict != VINF_SUCCESS)
9324 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9325 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9326 return rcStrict;
9327}
9328
9329
9330VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9331{
9332 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9333 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9334 if (rcStrict == VINF_SUCCESS)
9335 {
9336 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9337 if (pcbWritten)
9338 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9339 }
9340 else if (pVCpu->iem.s.cActiveMappings > 0)
9341 iemMemRollback(pVCpu);
9342
9343 return rcStrict;
9344}
9345
9346
9347VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9348 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9349{
9350 VBOXSTRICTRC rcStrict;
9351 if ( cbOpcodeBytes
9352 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9353 {
9354 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9355#ifdef IEM_WITH_CODE_TLB
9356 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9357 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9358 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9359 pVCpu->iem.s.offCurInstrStart = 0;
9360 pVCpu->iem.s.offInstrNextByte = 0;
9361 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9362#else
9363 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9364 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9365#endif
9366 rcStrict = VINF_SUCCESS;
9367 }
9368 else
9369 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9370 if (rcStrict == VINF_SUCCESS)
9371 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9372 else if (pVCpu->iem.s.cActiveMappings > 0)
9373 iemMemRollback(pVCpu);
9374
9375 return rcStrict;
9376}
9377
9378
9379VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9380{
9381 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9382 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9383 if (rcStrict == VINF_SUCCESS)
9384 {
9385 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9386 if (pcbWritten)
9387 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9388 }
9389 else if (pVCpu->iem.s.cActiveMappings > 0)
9390 iemMemRollback(pVCpu);
9391
9392 return rcStrict;
9393}
9394
9395
9396VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9397 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9398{
9399 VBOXSTRICTRC rcStrict;
9400 if ( cbOpcodeBytes
9401 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9402 {
9403 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9404#ifdef IEM_WITH_CODE_TLB
9405 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9406 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9407 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9408 pVCpu->iem.s.offCurInstrStart = 0;
9409 pVCpu->iem.s.offInstrNextByte = 0;
9410 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9411#else
9412 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9413 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9414#endif
9415 rcStrict = VINF_SUCCESS;
9416 }
9417 else
9418 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9419 if (rcStrict == VINF_SUCCESS)
9420 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9421 else if (pVCpu->iem.s.cActiveMappings > 0)
9422 iemMemRollback(pVCpu);
9423
9424 return rcStrict;
9425}
9426
9427
9428/**
9429 * For handling split cacheline lock operations when the host has split-lock
9430 * detection enabled.
9431 *
9432 * This will cause the interpreter to disregard the lock prefix and implicit
9433 * locking (xchg).
9434 *
9435 * @returns Strict VBox status code.
9436 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9437 */
9438VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9439{
9440 /*
9441 * Do the decoding and emulation.
9442 */
9443 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9444 if (rcStrict == VINF_SUCCESS)
9445 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9446 else if (pVCpu->iem.s.cActiveMappings > 0)
9447 iemMemRollback(pVCpu);
9448
9449 if (rcStrict != VINF_SUCCESS)
9450 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9451 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9452 return rcStrict;
9453}
9454
9455
9456/**
9457 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9458 * inject a pending TRPM trap.
9459 */
9460VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9461{
9462 Assert(TRPMHasTrap(pVCpu));
9463
9464 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9465 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9466 {
9467 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9468#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9469 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9470 if (fIntrEnabled)
9471 {
9472 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9473 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9474 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9475 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9476 else
9477 {
9478 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9479 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9480 }
9481 }
9482#else
9483 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9484#endif
9485 if (fIntrEnabled)
9486 {
9487 uint8_t u8TrapNo;
9488 TRPMEVENT enmType;
9489 uint32_t uErrCode;
9490 RTGCPTR uCr2;
9491 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9492 AssertRC(rc2);
9493 Assert(enmType == TRPM_HARDWARE_INT);
9494 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9495
9496 TRPMResetTrap(pVCpu);
9497
9498#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9499 /* Injecting an event may cause a VM-exit. */
9500 if ( rcStrict != VINF_SUCCESS
9501 && rcStrict != VINF_IEM_RAISED_XCPT)
9502 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9503#else
9504 NOREF(rcStrict);
9505#endif
9506 }
9507 }
9508
9509 return VINF_SUCCESS;
9510}
9511
9512
9513VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9514{
9515 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9516 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9517 Assert(cMaxInstructions > 0);
9518
9519 /*
9520 * See if there is an interrupt pending in TRPM, inject it if we can.
9521 */
9522 /** @todo What if we are injecting an exception and not an interrupt? Is that
9523 * possible here? For now we assert it is indeed only an interrupt. */
9524 if (!TRPMHasTrap(pVCpu))
9525 { /* likely */ }
9526 else
9527 {
9528 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9529 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9530 { /*likely */ }
9531 else
9532 return rcStrict;
9533 }
9534
9535 /*
9536 * Initial decoder init w/ prefetch, then setup setjmp.
9537 */
9538 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9539 if (rcStrict == VINF_SUCCESS)
9540 {
9541#ifdef IEM_WITH_SETJMP
9542 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9543 IEM_TRY_SETJMP(pVCpu, rcStrict)
9544#endif
9545 {
9546 /*
9547 * The run loop. We limit ourselves to 4096 instructions right now.
9548 */
9549 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9550 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9551 for (;;)
9552 {
9553 /*
9554 * Log the state.
9555 */
9556#ifdef LOG_ENABLED
9557 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9558#endif
9559
9560 /*
9561 * Do the decoding and emulation.
9562 */
9563 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9564 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9565#ifdef VBOX_STRICT
9566 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9567#endif
9568 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9569 {
9570 Assert(pVCpu->iem.s.cActiveMappings == 0);
9571 pVCpu->iem.s.cInstructions++;
9572
9573#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9574 /* Perform any VMX nested-guest instruction boundary actions. */
9575 uint64_t fCpu = pVCpu->fLocalForcedActions;
9576 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9577 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9578 { /* likely */ }
9579 else
9580 {
9581 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9582 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9583 fCpu = pVCpu->fLocalForcedActions;
9584 else
9585 {
9586 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9587 break;
9588 }
9589 }
9590#endif
9591 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9592 {
9593#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9594 uint64_t fCpu = pVCpu->fLocalForcedActions;
9595#endif
9596 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9597 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9598 | VMCPU_FF_TLB_FLUSH
9599 | VMCPU_FF_UNHALT );
9600
9601 if (RT_LIKELY( ( !fCpu
9602 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9603 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9604 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9605 {
9606 if (--cMaxInstructionsGccStupidity > 0)
9607 {
9608 /* Poll timers every now an then according to the caller's specs. */
9609 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9610 || !TMTimerPollBool(pVM, pVCpu))
9611 {
9612 Assert(pVCpu->iem.s.cActiveMappings == 0);
9613 iemReInitDecoder(pVCpu);
9614 continue;
9615 }
9616 }
9617 }
9618 }
9619 Assert(pVCpu->iem.s.cActiveMappings == 0);
9620 }
9621 else if (pVCpu->iem.s.cActiveMappings > 0)
9622 iemMemRollback(pVCpu);
9623 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9624 break;
9625 }
9626 }
9627#ifdef IEM_WITH_SETJMP
9628 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9629 {
9630 if (pVCpu->iem.s.cActiveMappings > 0)
9631 iemMemRollback(pVCpu);
9632# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9633 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9634# endif
9635 pVCpu->iem.s.cLongJumps++;
9636 }
9637 IEM_CATCH_LONGJMP_END(pVCpu);
9638#endif
9639
9640 /*
9641 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9642 */
9643 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9644 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9645 }
9646 else
9647 {
9648 if (pVCpu->iem.s.cActiveMappings > 0)
9649 iemMemRollback(pVCpu);
9650
9651#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9652 /*
9653 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9654 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9655 */
9656 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9657#endif
9658 }
9659
9660 /*
9661 * Maybe re-enter raw-mode and log.
9662 */
9663 if (rcStrict != VINF_SUCCESS)
9664 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9665 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9666 if (pcInstructions)
9667 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9668 return rcStrict;
9669}
9670
9671
9672/**
9673 * Interface used by EMExecuteExec, does exit statistics and limits.
9674 *
9675 * @returns Strict VBox status code.
9676 * @param pVCpu The cross context virtual CPU structure.
9677 * @param fWillExit To be defined.
9678 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9679 * @param cMaxInstructions Maximum number of instructions to execute.
9680 * @param cMaxInstructionsWithoutExits
9681 * The max number of instructions without exits.
9682 * @param pStats Where to return statistics.
9683 */
9684VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9685 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9686{
9687 NOREF(fWillExit); /** @todo define flexible exit crits */
9688
9689 /*
9690 * Initialize return stats.
9691 */
9692 pStats->cInstructions = 0;
9693 pStats->cExits = 0;
9694 pStats->cMaxExitDistance = 0;
9695 pStats->cReserved = 0;
9696
9697 /*
9698 * Initial decoder init w/ prefetch, then setup setjmp.
9699 */
9700 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9701 if (rcStrict == VINF_SUCCESS)
9702 {
9703#ifdef IEM_WITH_SETJMP
9704 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9705 IEM_TRY_SETJMP(pVCpu, rcStrict)
9706#endif
9707 {
9708#ifdef IN_RING0
9709 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9710#endif
9711 uint32_t cInstructionSinceLastExit = 0;
9712
9713 /*
9714 * The run loop. We limit ourselves to 4096 instructions right now.
9715 */
9716 PVM pVM = pVCpu->CTX_SUFF(pVM);
9717 for (;;)
9718 {
9719 /*
9720 * Log the state.
9721 */
9722#ifdef LOG_ENABLED
9723 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9724#endif
9725
9726 /*
9727 * Do the decoding and emulation.
9728 */
9729 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9730
9731 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9732 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9733
9734 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9735 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9736 {
9737 pStats->cExits += 1;
9738 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9739 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9740 cInstructionSinceLastExit = 0;
9741 }
9742
9743 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9744 {
9745 Assert(pVCpu->iem.s.cActiveMappings == 0);
9746 pVCpu->iem.s.cInstructions++;
9747 pStats->cInstructions++;
9748 cInstructionSinceLastExit++;
9749
9750#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9751 /* Perform any VMX nested-guest instruction boundary actions. */
9752 uint64_t fCpu = pVCpu->fLocalForcedActions;
9753 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9754 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9755 { /* likely */ }
9756 else
9757 {
9758 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9759 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9760 fCpu = pVCpu->fLocalForcedActions;
9761 else
9762 {
9763 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9764 break;
9765 }
9766 }
9767#endif
9768 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9769 {
9770#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9771 uint64_t fCpu = pVCpu->fLocalForcedActions;
9772#endif
9773 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9774 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9775 | VMCPU_FF_TLB_FLUSH
9776 | VMCPU_FF_UNHALT );
9777 if (RT_LIKELY( ( ( !fCpu
9778 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9779 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9780 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9781 || pStats->cInstructions < cMinInstructions))
9782 {
9783 if (pStats->cInstructions < cMaxInstructions)
9784 {
9785 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9786 {
9787#ifdef IN_RING0
9788 if ( !fCheckPreemptionPending
9789 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9790#endif
9791 {
9792 Assert(pVCpu->iem.s.cActiveMappings == 0);
9793 iemReInitDecoder(pVCpu);
9794 continue;
9795 }
9796#ifdef IN_RING0
9797 rcStrict = VINF_EM_RAW_INTERRUPT;
9798 break;
9799#endif
9800 }
9801 }
9802 }
9803 Assert(!(fCpu & VMCPU_FF_IEM));
9804 }
9805 Assert(pVCpu->iem.s.cActiveMappings == 0);
9806 }
9807 else if (pVCpu->iem.s.cActiveMappings > 0)
9808 iemMemRollback(pVCpu);
9809 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9810 break;
9811 }
9812 }
9813#ifdef IEM_WITH_SETJMP
9814 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9815 {
9816 if (pVCpu->iem.s.cActiveMappings > 0)
9817 iemMemRollback(pVCpu);
9818 pVCpu->iem.s.cLongJumps++;
9819 }
9820 IEM_CATCH_LONGJMP_END(pVCpu);
9821#endif
9822
9823 /*
9824 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9825 */
9826 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9827 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9828 }
9829 else
9830 {
9831 if (pVCpu->iem.s.cActiveMappings > 0)
9832 iemMemRollback(pVCpu);
9833
9834#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9835 /*
9836 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9837 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9838 */
9839 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9840#endif
9841 }
9842
9843 /*
9844 * Maybe re-enter raw-mode and log.
9845 */
9846 if (rcStrict != VINF_SUCCESS)
9847 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9848 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9849 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9850 return rcStrict;
9851}
9852
9853
9854/**
9855 * Injects a trap, fault, abort, software interrupt or external interrupt.
9856 *
9857 * The parameter list matches TRPMQueryTrapAll pretty closely.
9858 *
9859 * @returns Strict VBox status code.
9860 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9861 * @param u8TrapNo The trap number.
9862 * @param enmType What type is it (trap/fault/abort), software
9863 * interrupt or hardware interrupt.
9864 * @param uErrCode The error code if applicable.
9865 * @param uCr2 The CR2 value if applicable.
9866 * @param cbInstr The instruction length (only relevant for
9867 * software interrupts).
9868 */
9869VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9870 uint8_t cbInstr)
9871{
9872 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9873#ifdef DBGFTRACE_ENABLED
9874 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9875 u8TrapNo, enmType, uErrCode, uCr2);
9876#endif
9877
9878 uint32_t fFlags;
9879 switch (enmType)
9880 {
9881 case TRPM_HARDWARE_INT:
9882 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9883 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9884 uErrCode = uCr2 = 0;
9885 break;
9886
9887 case TRPM_SOFTWARE_INT:
9888 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9889 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9890 uErrCode = uCr2 = 0;
9891 break;
9892
9893 case TRPM_TRAP:
9894 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
9895 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9896 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9897 if (u8TrapNo == X86_XCPT_PF)
9898 fFlags |= IEM_XCPT_FLAGS_CR2;
9899 switch (u8TrapNo)
9900 {
9901 case X86_XCPT_DF:
9902 case X86_XCPT_TS:
9903 case X86_XCPT_NP:
9904 case X86_XCPT_SS:
9905 case X86_XCPT_PF:
9906 case X86_XCPT_AC:
9907 case X86_XCPT_GP:
9908 fFlags |= IEM_XCPT_FLAGS_ERR;
9909 break;
9910 }
9911 break;
9912
9913 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9914 }
9915
9916 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9917
9918 if (pVCpu->iem.s.cActiveMappings > 0)
9919 iemMemRollback(pVCpu);
9920
9921 return rcStrict;
9922}
9923
9924
9925/**
9926 * Injects the active TRPM event.
9927 *
9928 * @returns Strict VBox status code.
9929 * @param pVCpu The cross context virtual CPU structure.
9930 */
9931VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9932{
9933#ifndef IEM_IMPLEMENTS_TASKSWITCH
9934 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9935#else
9936 uint8_t u8TrapNo;
9937 TRPMEVENT enmType;
9938 uint32_t uErrCode;
9939 RTGCUINTPTR uCr2;
9940 uint8_t cbInstr;
9941 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9942 if (RT_FAILURE(rc))
9943 return rc;
9944
9945 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9946 * ICEBP \#DB injection as a special case. */
9947 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9948#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9949 if (rcStrict == VINF_SVM_VMEXIT)
9950 rcStrict = VINF_SUCCESS;
9951#endif
9952#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9953 if (rcStrict == VINF_VMX_VMEXIT)
9954 rcStrict = VINF_SUCCESS;
9955#endif
9956 /** @todo Are there any other codes that imply the event was successfully
9957 * delivered to the guest? See @bugref{6607}. */
9958 if ( rcStrict == VINF_SUCCESS
9959 || rcStrict == VINF_IEM_RAISED_XCPT)
9960 TRPMResetTrap(pVCpu);
9961
9962 return rcStrict;
9963#endif
9964}
9965
9966
9967VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9968{
9969 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9970 return VERR_NOT_IMPLEMENTED;
9971}
9972
9973
9974VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9975{
9976 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9977 return VERR_NOT_IMPLEMENTED;
9978}
9979
9980
9981/**
9982 * Interface for HM and EM for executing string I/O OUT (write) instructions.
9983 *
9984 * This API ASSUMES that the caller has already verified that the guest code is
9985 * allowed to access the I/O port. (The I/O port is in the DX register in the
9986 * guest state.)
9987 *
9988 * @returns Strict VBox status code.
9989 * @param pVCpu The cross context virtual CPU structure.
9990 * @param cbValue The size of the I/O port access (1, 2, or 4).
9991 * @param enmAddrMode The addressing mode.
9992 * @param fRepPrefix Indicates whether a repeat prefix is used
9993 * (doesn't matter which for this instruction).
9994 * @param cbInstr The instruction length in bytes.
9995 * @param iEffSeg The effective segment address.
9996 * @param fIoChecked Whether the access to the I/O port has been
9997 * checked or not. It's typically checked in the
9998 * HM scenario.
9999 */
10000VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10001 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10002{
10003 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10004 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10005
10006 /*
10007 * State init.
10008 */
10009 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10010
10011 /*
10012 * Switch orgy for getting to the right handler.
10013 */
10014 VBOXSTRICTRC rcStrict;
10015 if (fRepPrefix)
10016 {
10017 switch (enmAddrMode)
10018 {
10019 case IEMMODE_16BIT:
10020 switch (cbValue)
10021 {
10022 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10023 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10024 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10025 default:
10026 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10027 }
10028 break;
10029
10030 case IEMMODE_32BIT:
10031 switch (cbValue)
10032 {
10033 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10034 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10035 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10036 default:
10037 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10038 }
10039 break;
10040
10041 case IEMMODE_64BIT:
10042 switch (cbValue)
10043 {
10044 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10045 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10046 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10047 default:
10048 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10049 }
10050 break;
10051
10052 default:
10053 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10054 }
10055 }
10056 else
10057 {
10058 switch (enmAddrMode)
10059 {
10060 case IEMMODE_16BIT:
10061 switch (cbValue)
10062 {
10063 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10064 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10065 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10066 default:
10067 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10068 }
10069 break;
10070
10071 case IEMMODE_32BIT:
10072 switch (cbValue)
10073 {
10074 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10075 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10076 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10077 default:
10078 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10079 }
10080 break;
10081
10082 case IEMMODE_64BIT:
10083 switch (cbValue)
10084 {
10085 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10086 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10087 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10088 default:
10089 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10090 }
10091 break;
10092
10093 default:
10094 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10095 }
10096 }
10097
10098 if (pVCpu->iem.s.cActiveMappings)
10099 iemMemRollback(pVCpu);
10100
10101 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10102}
10103
10104
10105/**
10106 * Interface for HM and EM for executing string I/O IN (read) instructions.
10107 *
10108 * This API ASSUMES that the caller has already verified that the guest code is
10109 * allowed to access the I/O port. (The I/O port is in the DX register in the
10110 * guest state.)
10111 *
10112 * @returns Strict VBox status code.
10113 * @param pVCpu The cross context virtual CPU structure.
10114 * @param cbValue The size of the I/O port access (1, 2, or 4).
10115 * @param enmAddrMode The addressing mode.
10116 * @param fRepPrefix Indicates whether a repeat prefix is used
10117 * (doesn't matter which for this instruction).
10118 * @param cbInstr The instruction length in bytes.
10119 * @param fIoChecked Whether the access to the I/O port has been
10120 * checked or not. It's typically checked in the
10121 * HM scenario.
10122 */
10123VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10124 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10125{
10126 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10127
10128 /*
10129 * State init.
10130 */
10131 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10132
10133 /*
10134 * Switch orgy for getting to the right handler.
10135 */
10136 VBOXSTRICTRC rcStrict;
10137 if (fRepPrefix)
10138 {
10139 switch (enmAddrMode)
10140 {
10141 case IEMMODE_16BIT:
10142 switch (cbValue)
10143 {
10144 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10145 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10146 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10147 default:
10148 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10149 }
10150 break;
10151
10152 case IEMMODE_32BIT:
10153 switch (cbValue)
10154 {
10155 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10156 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10157 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10158 default:
10159 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10160 }
10161 break;
10162
10163 case IEMMODE_64BIT:
10164 switch (cbValue)
10165 {
10166 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10167 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10168 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10169 default:
10170 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10171 }
10172 break;
10173
10174 default:
10175 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10176 }
10177 }
10178 else
10179 {
10180 switch (enmAddrMode)
10181 {
10182 case IEMMODE_16BIT:
10183 switch (cbValue)
10184 {
10185 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10186 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10187 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10188 default:
10189 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10190 }
10191 break;
10192
10193 case IEMMODE_32BIT:
10194 switch (cbValue)
10195 {
10196 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10197 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10198 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10199 default:
10200 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10201 }
10202 break;
10203
10204 case IEMMODE_64BIT:
10205 switch (cbValue)
10206 {
10207 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10208 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10209 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10210 default:
10211 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10212 }
10213 break;
10214
10215 default:
10216 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10217 }
10218 }
10219
10220 if ( pVCpu->iem.s.cActiveMappings == 0
10221 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10222 { /* likely */ }
10223 else
10224 {
10225 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10226 iemMemRollback(pVCpu);
10227 }
10228 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10229}
10230
10231
10232/**
10233 * Interface for rawmode to write execute an OUT instruction.
10234 *
10235 * @returns Strict VBox status code.
10236 * @param pVCpu The cross context virtual CPU structure.
10237 * @param cbInstr The instruction length in bytes.
10238 * @param u16Port The port to read.
10239 * @param fImm Whether the port is specified using an immediate operand or
10240 * using the implicit DX register.
10241 * @param cbReg The register size.
10242 *
10243 * @remarks In ring-0 not all of the state needs to be synced in.
10244 */
10245VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10246{
10247 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10248 Assert(cbReg <= 4 && cbReg != 3);
10249
10250 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10251 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10252 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10253 Assert(!pVCpu->iem.s.cActiveMappings);
10254 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10255}
10256
10257
10258/**
10259 * Interface for rawmode to write execute an IN instruction.
10260 *
10261 * @returns Strict VBox status code.
10262 * @param pVCpu The cross context virtual CPU structure.
10263 * @param cbInstr The instruction length in bytes.
10264 * @param u16Port The port to read.
10265 * @param fImm Whether the port is specified using an immediate operand or
10266 * using the implicit DX.
10267 * @param cbReg The register size.
10268 */
10269VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10270{
10271 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10272 Assert(cbReg <= 4 && cbReg != 3);
10273
10274 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10275 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10276 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10277 Assert(!pVCpu->iem.s.cActiveMappings);
10278 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10279}
10280
10281
10282/**
10283 * Interface for HM and EM to write to a CRx register.
10284 *
10285 * @returns Strict VBox status code.
10286 * @param pVCpu The cross context virtual CPU structure.
10287 * @param cbInstr The instruction length in bytes.
10288 * @param iCrReg The control register number (destination).
10289 * @param iGReg The general purpose register number (source).
10290 *
10291 * @remarks In ring-0 not all of the state needs to be synced in.
10292 */
10293VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10294{
10295 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10296 Assert(iCrReg < 16);
10297 Assert(iGReg < 16);
10298
10299 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10300 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10301 Assert(!pVCpu->iem.s.cActiveMappings);
10302 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10303}
10304
10305
10306/**
10307 * Interface for HM and EM to read from a CRx register.
10308 *
10309 * @returns Strict VBox status code.
10310 * @param pVCpu The cross context virtual CPU structure.
10311 * @param cbInstr The instruction length in bytes.
10312 * @param iGReg The general purpose register number (destination).
10313 * @param iCrReg The control register number (source).
10314 *
10315 * @remarks In ring-0 not all of the state needs to be synced in.
10316 */
10317VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10318{
10319 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10320 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10321 | CPUMCTX_EXTRN_APIC_TPR);
10322 Assert(iCrReg < 16);
10323 Assert(iGReg < 16);
10324
10325 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10326 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10327 Assert(!pVCpu->iem.s.cActiveMappings);
10328 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10329}
10330
10331
10332/**
10333 * Interface for HM and EM to write to a DRx register.
10334 *
10335 * @returns Strict VBox status code.
10336 * @param pVCpu The cross context virtual CPU structure.
10337 * @param cbInstr The instruction length in bytes.
10338 * @param iDrReg The debug register number (destination).
10339 * @param iGReg The general purpose register number (source).
10340 *
10341 * @remarks In ring-0 not all of the state needs to be synced in.
10342 */
10343VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10344{
10345 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10346 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10347 Assert(iDrReg < 8);
10348 Assert(iGReg < 16);
10349
10350 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10351 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10352 Assert(!pVCpu->iem.s.cActiveMappings);
10353 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10354}
10355
10356
10357/**
10358 * Interface for HM and EM to read from a DRx register.
10359 *
10360 * @returns Strict VBox status code.
10361 * @param pVCpu The cross context virtual CPU structure.
10362 * @param cbInstr The instruction length in bytes.
10363 * @param iGReg The general purpose register number (destination).
10364 * @param iDrReg The debug register number (source).
10365 *
10366 * @remarks In ring-0 not all of the state needs to be synced in.
10367 */
10368VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10369{
10370 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10371 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10372 Assert(iDrReg < 8);
10373 Assert(iGReg < 16);
10374
10375 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10376 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10377 Assert(!pVCpu->iem.s.cActiveMappings);
10378 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10379}
10380
10381
10382/**
10383 * Interface for HM and EM to clear the CR0[TS] bit.
10384 *
10385 * @returns Strict VBox status code.
10386 * @param pVCpu The cross context virtual CPU structure.
10387 * @param cbInstr The instruction length in bytes.
10388 *
10389 * @remarks In ring-0 not all of the state needs to be synced in.
10390 */
10391VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10392{
10393 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10394
10395 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10396 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10397 Assert(!pVCpu->iem.s.cActiveMappings);
10398 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10399}
10400
10401
10402/**
10403 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10404 *
10405 * @returns Strict VBox status code.
10406 * @param pVCpu The cross context virtual CPU structure.
10407 * @param cbInstr The instruction length in bytes.
10408 * @param uValue The value to load into CR0.
10409 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10410 * memory operand. Otherwise pass NIL_RTGCPTR.
10411 *
10412 * @remarks In ring-0 not all of the state needs to be synced in.
10413 */
10414VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10415{
10416 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10417
10418 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10419 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10420 Assert(!pVCpu->iem.s.cActiveMappings);
10421 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10422}
10423
10424
10425/**
10426 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10427 *
10428 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10429 *
10430 * @returns Strict VBox status code.
10431 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10432 * @param cbInstr The instruction length in bytes.
10433 * @remarks In ring-0 not all of the state needs to be synced in.
10434 * @thread EMT(pVCpu)
10435 */
10436VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10437{
10438 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10439
10440 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10441 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10442 Assert(!pVCpu->iem.s.cActiveMappings);
10443 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10444}
10445
10446
10447/**
10448 * Interface for HM and EM to emulate the WBINVD instruction.
10449 *
10450 * @returns Strict VBox status code.
10451 * @param pVCpu The cross context virtual CPU structure.
10452 * @param cbInstr The instruction length in bytes.
10453 *
10454 * @remarks In ring-0 not all of the state needs to be synced in.
10455 */
10456VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10457{
10458 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10459
10460 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10461 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10462 Assert(!pVCpu->iem.s.cActiveMappings);
10463 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10464}
10465
10466
10467/**
10468 * Interface for HM and EM to emulate the INVD instruction.
10469 *
10470 * @returns Strict VBox status code.
10471 * @param pVCpu The cross context virtual CPU structure.
10472 * @param cbInstr The instruction length in bytes.
10473 *
10474 * @remarks In ring-0 not all of the state needs to be synced in.
10475 */
10476VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10477{
10478 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10479
10480 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10481 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10482 Assert(!pVCpu->iem.s.cActiveMappings);
10483 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10484}
10485
10486
10487/**
10488 * Interface for HM and EM to emulate the INVLPG instruction.
10489 *
10490 * @returns Strict VBox status code.
10491 * @retval VINF_PGM_SYNC_CR3
10492 *
10493 * @param pVCpu The cross context virtual CPU structure.
10494 * @param cbInstr The instruction length in bytes.
10495 * @param GCPtrPage The effective address of the page to invalidate.
10496 *
10497 * @remarks In ring-0 not all of the state needs to be synced in.
10498 */
10499VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10500{
10501 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10502
10503 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10504 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10505 Assert(!pVCpu->iem.s.cActiveMappings);
10506 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10507}
10508
10509
10510/**
10511 * Interface for HM and EM to emulate the INVPCID instruction.
10512 *
10513 * @returns Strict VBox status code.
10514 * @retval VINF_PGM_SYNC_CR3
10515 *
10516 * @param pVCpu The cross context virtual CPU structure.
10517 * @param cbInstr The instruction length in bytes.
10518 * @param iEffSeg The effective segment register.
10519 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10520 * @param uType The invalidation type.
10521 *
10522 * @remarks In ring-0 not all of the state needs to be synced in.
10523 */
10524VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10525 uint64_t uType)
10526{
10527 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10528
10529 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10530 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10531 Assert(!pVCpu->iem.s.cActiveMappings);
10532 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10533}
10534
10535
10536/**
10537 * Interface for HM and EM to emulate the CPUID instruction.
10538 *
10539 * @returns Strict VBox status code.
10540 *
10541 * @param pVCpu The cross context virtual CPU structure.
10542 * @param cbInstr The instruction length in bytes.
10543 *
10544 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10545 */
10546VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10547{
10548 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10549 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10550
10551 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10552 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10553 Assert(!pVCpu->iem.s.cActiveMappings);
10554 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10555}
10556
10557
10558/**
10559 * Interface for HM and EM to emulate the RDPMC instruction.
10560 *
10561 * @returns Strict VBox status code.
10562 *
10563 * @param pVCpu The cross context virtual CPU structure.
10564 * @param cbInstr The instruction length in bytes.
10565 *
10566 * @remarks Not all of the state needs to be synced in.
10567 */
10568VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10569{
10570 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10571 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10572
10573 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10574 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10575 Assert(!pVCpu->iem.s.cActiveMappings);
10576 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10577}
10578
10579
10580/**
10581 * Interface for HM and EM to emulate the RDTSC instruction.
10582 *
10583 * @returns Strict VBox status code.
10584 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10585 *
10586 * @param pVCpu The cross context virtual CPU structure.
10587 * @param cbInstr The instruction length in bytes.
10588 *
10589 * @remarks Not all of the state needs to be synced in.
10590 */
10591VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10592{
10593 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10594 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10595
10596 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10597 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10598 Assert(!pVCpu->iem.s.cActiveMappings);
10599 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10600}
10601
10602
10603/**
10604 * Interface for HM and EM to emulate the RDTSCP instruction.
10605 *
10606 * @returns Strict VBox status code.
10607 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10608 *
10609 * @param pVCpu The cross context virtual CPU structure.
10610 * @param cbInstr The instruction length in bytes.
10611 *
10612 * @remarks Not all of the state needs to be synced in. Recommended
10613 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10614 */
10615VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10616{
10617 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10618 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10619
10620 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10621 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10622 Assert(!pVCpu->iem.s.cActiveMappings);
10623 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10624}
10625
10626
10627/**
10628 * Interface for HM and EM to emulate the RDMSR instruction.
10629 *
10630 * @returns Strict VBox status code.
10631 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10632 *
10633 * @param pVCpu The cross context virtual CPU structure.
10634 * @param cbInstr The instruction length in bytes.
10635 *
10636 * @remarks Not all of the state needs to be synced in. Requires RCX and
10637 * (currently) all MSRs.
10638 */
10639VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10640{
10641 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10642 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10643
10644 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10645 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10646 Assert(!pVCpu->iem.s.cActiveMappings);
10647 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10648}
10649
10650
10651/**
10652 * Interface for HM and EM to emulate the WRMSR instruction.
10653 *
10654 * @returns Strict VBox status code.
10655 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10656 *
10657 * @param pVCpu The cross context virtual CPU structure.
10658 * @param cbInstr The instruction length in bytes.
10659 *
10660 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10661 * and (currently) all MSRs.
10662 */
10663VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10664{
10665 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10666 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10667 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10668
10669 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10670 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10671 Assert(!pVCpu->iem.s.cActiveMappings);
10672 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10673}
10674
10675
10676/**
10677 * Interface for HM and EM to emulate the MONITOR instruction.
10678 *
10679 * @returns Strict VBox status code.
10680 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10681 *
10682 * @param pVCpu The cross context virtual CPU structure.
10683 * @param cbInstr The instruction length in bytes.
10684 *
10685 * @remarks Not all of the state needs to be synced in.
10686 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10687 * are used.
10688 */
10689VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10690{
10691 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10692 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10693
10694 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10695 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10696 Assert(!pVCpu->iem.s.cActiveMappings);
10697 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10698}
10699
10700
10701/**
10702 * Interface for HM and EM to emulate the MWAIT instruction.
10703 *
10704 * @returns Strict VBox status code.
10705 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10706 *
10707 * @param pVCpu The cross context virtual CPU structure.
10708 * @param cbInstr The instruction length in bytes.
10709 *
10710 * @remarks Not all of the state needs to be synced in.
10711 */
10712VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10713{
10714 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10715 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10716
10717 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10718 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10719 Assert(!pVCpu->iem.s.cActiveMappings);
10720 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10721}
10722
10723
10724/**
10725 * Interface for HM and EM to emulate the HLT instruction.
10726 *
10727 * @returns Strict VBox status code.
10728 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10729 *
10730 * @param pVCpu The cross context virtual CPU structure.
10731 * @param cbInstr The instruction length in bytes.
10732 *
10733 * @remarks Not all of the state needs to be synced in.
10734 */
10735VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10736{
10737 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10738
10739 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10740 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10741 Assert(!pVCpu->iem.s.cActiveMappings);
10742 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10743}
10744
10745
10746/**
10747 * Checks if IEM is in the process of delivering an event (interrupt or
10748 * exception).
10749 *
10750 * @returns true if we're in the process of raising an interrupt or exception,
10751 * false otherwise.
10752 * @param pVCpu The cross context virtual CPU structure.
10753 * @param puVector Where to store the vector associated with the
10754 * currently delivered event, optional.
10755 * @param pfFlags Where to store th event delivery flags (see
10756 * IEM_XCPT_FLAGS_XXX), optional.
10757 * @param puErr Where to store the error code associated with the
10758 * event, optional.
10759 * @param puCr2 Where to store the CR2 associated with the event,
10760 * optional.
10761 * @remarks The caller should check the flags to determine if the error code and
10762 * CR2 are valid for the event.
10763 */
10764VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10765{
10766 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10767 if (fRaisingXcpt)
10768 {
10769 if (puVector)
10770 *puVector = pVCpu->iem.s.uCurXcpt;
10771 if (pfFlags)
10772 *pfFlags = pVCpu->iem.s.fCurXcpt;
10773 if (puErr)
10774 *puErr = pVCpu->iem.s.uCurXcptErr;
10775 if (puCr2)
10776 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10777 }
10778 return fRaisingXcpt;
10779}
10780
10781#ifdef IN_RING3
10782
10783/**
10784 * Handles the unlikely and probably fatal merge cases.
10785 *
10786 * @returns Merged status code.
10787 * @param rcStrict Current EM status code.
10788 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10789 * with @a rcStrict.
10790 * @param iMemMap The memory mapping index. For error reporting only.
10791 * @param pVCpu The cross context virtual CPU structure of the calling
10792 * thread, for error reporting only.
10793 */
10794DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10795 unsigned iMemMap, PVMCPUCC pVCpu)
10796{
10797 if (RT_FAILURE_NP(rcStrict))
10798 return rcStrict;
10799
10800 if (RT_FAILURE_NP(rcStrictCommit))
10801 return rcStrictCommit;
10802
10803 if (rcStrict == rcStrictCommit)
10804 return rcStrictCommit;
10805
10806 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10807 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10808 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10809 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10810 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10811 return VERR_IOM_FF_STATUS_IPE;
10812}
10813
10814
10815/**
10816 * Helper for IOMR3ProcessForceFlag.
10817 *
10818 * @returns Merged status code.
10819 * @param rcStrict Current EM status code.
10820 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10821 * with @a rcStrict.
10822 * @param iMemMap The memory mapping index. For error reporting only.
10823 * @param pVCpu The cross context virtual CPU structure of the calling
10824 * thread, for error reporting only.
10825 */
10826DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10827{
10828 /* Simple. */
10829 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10830 return rcStrictCommit;
10831
10832 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10833 return rcStrict;
10834
10835 /* EM scheduling status codes. */
10836 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10837 && rcStrict <= VINF_EM_LAST))
10838 {
10839 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10840 && rcStrictCommit <= VINF_EM_LAST))
10841 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10842 }
10843
10844 /* Unlikely */
10845 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10846}
10847
10848
10849/**
10850 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10851 *
10852 * @returns Merge between @a rcStrict and what the commit operation returned.
10853 * @param pVM The cross context VM structure.
10854 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10855 * @param rcStrict The status code returned by ring-0 or raw-mode.
10856 */
10857VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10858{
10859 /*
10860 * Reset the pending commit.
10861 */
10862 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10863 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10864 ("%#x %#x %#x\n",
10865 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10866 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10867
10868 /*
10869 * Commit the pending bounce buffers (usually just one).
10870 */
10871 unsigned cBufs = 0;
10872 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10873 while (iMemMap-- > 0)
10874 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10875 {
10876 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10877 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10878 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10879
10880 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10881 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10882 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10883
10884 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10885 {
10886 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10887 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10888 pbBuf,
10889 cbFirst,
10890 PGMACCESSORIGIN_IEM);
10891 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10892 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10893 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10894 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10895 }
10896
10897 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10898 {
10899 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10900 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10901 pbBuf + cbFirst,
10902 cbSecond,
10903 PGMACCESSORIGIN_IEM);
10904 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10905 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10906 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10907 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10908 }
10909 cBufs++;
10910 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10911 }
10912
10913 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10914 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10915 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10916 pVCpu->iem.s.cActiveMappings = 0;
10917 return rcStrict;
10918}
10919
10920#endif /* IN_RING3 */
10921
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette